summaryrefslogtreecommitdiffstats
path: root/drivers/pci/controller/dwc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/pci/controller/dwc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/pci/controller/dwc/Kconfig418
-rw-r--r--drivers/pci/controller/dwc/Makefile49
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c960
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c443
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1612
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1338
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c288
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c272
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c497
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c380
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c349
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c500
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c645
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c820
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c880
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c187
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c1064
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h645
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c368
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c180
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c450
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c453
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c465
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c833
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c916
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1655
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c266
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c109
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c2514
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c453
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c409
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c329
33 files changed, 21104 insertions, 0 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
new file mode 100644
index 0000000000..ab96da43e0
--- /dev/null
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -0,0 +1,418 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "DesignWare-based PCIe controllers"
+ depends on PCI
+
+config PCIE_DW
+ bool
+
+config PCIE_DW_HOST
+ bool
+ select PCIE_DW
+
+config PCIE_DW_EP
+ bool
+ select PCIE_DW
+
+config PCIE_AL
+ bool "Amazon Annapurna Labs PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_ECAM
+ help
+ Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+ controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+ core plus Annapurna Labs proprietary hardware wrappers. This is
+ required only for DT-based platforms. ACPI platforms with the
+ Annapurna Labs PCIe controller don't need to enable this.
+
+config PCI_MESON
+ tristate "Amlogic Meson PCIe controller"
+ default m if ARCH_MESON
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
+
+config PCIE_ARTPEC6
+ bool
+
+config PCIE_ARTPEC6_HOST
+ bool "Axis ARTPEC-6 PCIe controller (host mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_ARTPEC6
+ help
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ host mode. This uses the DesignWare core.
+
+config PCIE_ARTPEC6_EP
+ bool "Axis ARTPEC-6 PCIe controller (endpoint mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ARTPEC6
+ help
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ endpoint mode. This uses the DesignWare core.
+
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+
+config PCI_IMX6
+ bool
+
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller (host mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in Root Complex mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller (endpoint mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in endpoint mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_LAYERSCAPE
+ bool "Freescale Layerscape PCIe controller (host mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select MFD_SYSCON
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Host mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_LAYERSCAPE_EP
+ bool "Freescale Layerscape PCIe controller (endpoint mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Endpoint mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_HISI
+ depends on OF && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon Hip05 and Hip06 SoCs PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe controller support on HiSilicon
+ Hip05 and Hip06 SoCs
+
+config PCIE_KIRIN
+ depends on OF && (ARM64 || COMPILE_TEST)
+ tristate "HiSilicon Kirin PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select REGMAP_MMIO
+ help
+ Say Y here if you want PCIe controller support
+ on HiSilicon Kirin series SoCs.
+
+config PCIE_HISI_STB
+ bool "HiSilicon STB PCIe controller"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
+config PCIE_INTEL_GW
+ bool "Intel Gateway PCIe controller "
+ depends on OF && (X86 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say 'Y' here to enable PCIe Host controller support on Intel
+ Gateway SoCs.
+ The PCIe controller uses the DesignWare core plus Intel-specific
+ hardware wrappers.
+
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller (host mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller (endpoint mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_TEGRA194
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (host mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (endpoint mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in endpoint mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_DW_PLAT
+ bool
+
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe controller (host mode)"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe controller (endpoint mode)"
+ depends on PCI && PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_QCOM
+ bool "Qualcomm PCIe controller (host mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select CRC8
+ help
+ Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+ PCIe controller uses the DesignWare core plus Qualcomm-specific
+ hardware wrappers.
+
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller (endpoint mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
+config PCI_EXYNOS
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
+
+config PCIE_FU740
+ bool "SiFive FU740 PCIe controller"
+ depends on PCI_MSI
+ depends on SOC_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
+
+config PCIE_UNIPHIER
+ bool "Socionext UniPhier PCIe controller (host mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
+ This driver supports LD20 and PXs3 SoCs.
+
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe controller (endpoint mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
+config PCIE_SPEAR13XX
+ bool "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCI_DRA7XX
+ tristate
+
+config PCI_DRA7XX_HOST
+ tristate "TI DRA7xx PCIe controller (host mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_DRA7XX
+ default y if SOC_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+config PCI_DRA7XX_EP
+ tristate "TI DRA7xx PCIe controller (endpoint mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+config PCI_KEYSTONE
+ bool
+
+config PCI_KEYSTONE_HOST
+ bool "TI Keystone PCIe controller (host mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ bool "TI Keystone PCIe controller (endpoint mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controller"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
+
+endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
new file mode 100644
index 0000000000..bf5c311875
--- /dev/null
+++ b/drivers/pci/controller/dwc/Makefile
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
+obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
+obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
+obj-$(CONFIG_ARM64) += pcie-al.o
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
+endif
+endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
new file mode 100644
index 0000000000..b445ffe95e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
+#define ERR_SYS BIT(0)
+#define ERR_FATAL BIT(1)
+#define ERR_NONFATAL BIT(2)
+#define ERR_COR BIT(3)
+#define ERR_AXI BIT(4)
+#define ERR_ECRC BIT(5)
+#define PME_TURN_OFF BIT(8)
+#define PME_TO_ACK BIT(9)
+#define PM_PME BIT(10)
+#define LINK_REQ_RST BIT(11)
+#define LINK_UP_EVT BIT(12)
+#define CFG_BME_EVT BIT(13)
+#define CFG_MSE_EVT BIT(14)
+#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+ ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+ LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
+#define INTA BIT(0)
+#define INTB BIT(1)
+#define INTC BIT(2)
+#define INTD BIT(3)
+#define MSI BIT(4)
+#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
+#define DEVICE_TYPE_EP 0x0
+#define DEVICE_TYPE_LEG_EP 0x1
+#define DEVICE_TYPE_RC 0x4
+
+#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
+#define LTSSM_EN 0x1
+
+#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
+#define LINK_UP BIT(16)
+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
+
+#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
+#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
+
+#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
+#define MSI_REQ_GRANT BIT(0)
+#define MSI_VECTOR_SHIFT 7
+
+#define PCIE_1LANE_2LANE_SELECTION BIT(13)
+#define PCIE_B1C0_MODE_SEL BIT(2)
+#define PCIE_B0_B1_TSYNCEN BIT(0)
+
+struct dra7xx_pcie {
+ struct dw_pcie *pci;
+ void __iomem *base; /* DT ti_conf */
+ int phy_count; /* DT phy-names count */
+ struct phy **phy;
+ struct irq_domain *irq_domain;
+ struct clk *clk;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
+};
+
+#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+ return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->base + offset);
+}
+
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+ return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
+static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+ return !!(reg & LINK_UP);
+}
+
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 reg;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_err(dev, "link is already up\n");
+ return 0;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg |= LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ return 0;
+}
+
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+ LEG_EP_INTERRUPTS | MSI);
+
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+ MSI | LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+ INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+ dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ dra7xx_pcie_enable_interrupts(dra7xx);
+
+ return 0;
+}
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = dra7xx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned long val;
+ int pos;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
+
+ pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
+ }
+
+ return 1;
+}
+
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ unsigned long reg;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+ switch (reg) {
+ case MSI:
+ dra7xx_pcie_handle_msi_irq(pp);
+ break;
+ case INTA:
+ case INTB:
+ case INTC:
+ case INTD:
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
+ break;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct dra7xx_pcie *dra7xx = arg;
+ struct dw_pcie *pci = dra7xx->pci;
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+ if (reg & ERR_SYS)
+ dev_dbg(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_dbg(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_COR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_dbg(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_ECRC)
+ dev_dbg(dev, "ECRC Error\n");
+
+ if (reg & PME_TURN_OFF)
+ dev_dbg(dev,
+ "Power Management Event Turn-Off message received\n");
+
+ if (reg & PME_TO_ACK)
+ dev_dbg(dev,
+ "Power Management Turn-Off Ack message received\n");
+
+ if (reg & PM_PME)
+ dev_dbg(dev, "PM Power Management Event message received\n");
+
+ if (reg & LINK_REQ_RST)
+ dev_dbg(dev, "Link Request Reset\n");
+
+ if (reg & LINK_UP_EVT) {
+ if (dra7xx->mode == DW_PCIE_EP_TYPE)
+ dw_pcie_ep_linkup(ep);
+ dev_dbg(dev, "Link-up state change\n");
+ }
+
+ if (reg & CFG_BME_EVT)
+ dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
+
+ if (reg & CFG_MSE_EVT)
+ dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+ return IRQ_HANDLED;
+}
+
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .host_init = dra7xx_pcie_host_init,
+};
+
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+ mdelay(1);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+ u8 interrupt_num)
+{
+ u32 reg;
+
+ reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+ reg |= MSI_REQ_GRANT;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dra7xx_pcie_raise_legacy_irq(dra7xx);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dra7xx_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dra7xx_pcie_ep_init,
+ .raise_irq = dra7xx_pcie_raise_irq,
+ .get_features = dra7xx_pcie_get_features,
+};
+
+static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dra7xx->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->dbi_base2 =
+ devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie *pci = dra7xx->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ /* MSI IRQ is muxed */
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dra7xx_pcie_init_irq_domain(pp);
+ if (ret < 0)
+ return ret;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pp->ops = &dra7xx_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+ .start_link = dra7xx_pcie_establish_link,
+ .stop_link = dra7xx_pcie_stop_link,
+ .link_up = dra7xx_pcie_link_up,
+};
+
+static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
+{
+ int phy_count = dra7xx->phy_count;
+
+ while (phy_count--) {
+ phy_power_off(dra7xx->phy[phy_count]);
+ phy_exit(dra7xx->phy[phy_count]);
+ }
+}
+
+static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
+{
+ int phy_count = dra7xx->phy_count;
+ int ret;
+ int i;
+
+ for (i = 0; i < phy_count; i++) {
+ ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_init(dra7xx->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(dra7xx->phy[i]);
+ if (ret < 0) {
+ phy_exit(dra7xx->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(dra7xx->phy[i]);
+ phy_exit(dra7xx->phy[i]);
+ }
+
+ return ret;
+}
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ {
+ .compatible = "ti,dra7-pcie",
+ .data = &dra7xx_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra7-pcie-ep",
+ .data = &dra7xx_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-rc",
+ .data = &dra746_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-rc",
+ .data = &dra726_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-ep",
+ .data = &dra746_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-ep",
+ .data = &dra726_pcie_ep_of_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+/*
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(np,
+ "ti,syscon-unaligned-access");
+ if (IS_ERR(regmap)) {
+ dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+ return -EINVAL;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+ 2, 0, &args);
+ if (ret) {
+ dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+ args.args[1]);
+ if (ret)
+ dev_err(dev, "failed to enable unaligned access\n");
+
+ of_node_put(args.np);
+
+ return ret;
+}
+
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+ u32 b1co_mode_sel_mask)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *pcie_syscon;
+ unsigned int pcie_reg;
+ u32 mask;
+ u32 val;
+
+ pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ if (IS_ERR(pcie_syscon)) {
+ dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
+ &pcie_reg)) {
+ dev_err(dev, "couldn't get lane selection reg offset\n");
+ return -EINVAL;
+ }
+
+ mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+ val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+ regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_probe(struct platform_device *pdev)
+{
+ u32 reg;
+ int ret;
+ int irq;
+ int i;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ void __iomem *base;
+ struct dw_pcie *pci;
+ struct dra7xx_pcie *dra7xx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char name[10];
+ struct gpio_desc *reset;
+ const struct dra7xx_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+ b1co_mode_sel_mask = data->b1co_mode_sel_mask;
+
+ dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+ if (!dra7xx)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 0) {
+ dev_err(dev, "unable to find the strings\n");
+ return phy_count;
+ }
+
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ dra7xx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(dra7xx->clk))
+ return dev_err_probe(dev, PTR_ERR(dra7xx->clk),
+ "clock request failed");
+
+ ret = clk_prepare_enable(dra7xx->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < phy_count; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i]))
+ return PTR_ERR(phy[i]);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ dra7xx->base = base;
+ dra7xx->phy = phy;
+ dra7xx->pci = pci;
+ dra7xx->phy_count = phy_count;
+
+ if (phy_count == 2) {
+ ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+ if (ret < 0)
+ dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+ }
+
+ ret = dra7xx_pcie_enable_phy(dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dra7xx);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+ goto err_gpio;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_RC);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ dev_err(dev, "WA for Errata i870 not applied\n");
+
+ ret = dra7xx_add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_EP);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ goto err_gpio;
+
+ ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+ dra7xx->mode = mode;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_gpio;
+ }
+
+ return 0;
+
+err_gpio:
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct dw_pcie *pci = dra7xx->pci;
+ u32 val;
+
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* clear MSE */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val &= ~PCI_COMMAND_MEMORY;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct dw_pcie *pci = dra7xx->pci;
+ u32 val;
+
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* set MSE */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+
+ dra7xx_pcie_disable_phy(dra7xx);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dra7xx_pcie_enable_phy(dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ dra7xx_pcie_stop_link(dra7xx->pci);
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+
+ clk_disable_unprepare(dra7xx->clk);
+}
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
+};
+
+static struct platform_driver dra7xx_pcie_driver = {
+ .probe = dra7xx_pcie_probe,
+ .driver = {
+ .name = "dra7-pcie",
+ .of_match_table = of_dra7xx_pcie_match,
+ .suppress_bind_attrs = true,
+ .pm = &dra7xx_pcie_pm_ops,
+ },
+ .shutdown = dra7xx_pcie_shutdown,
+};
+module_platform_driver(dra7xx_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
new file mode 100644
index 0000000000..c6bede3469
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Samsung Exynos SoCs
+ *
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ * Jaehoon Chung <jh80.chung@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT BIT(0)
+#define IRQ_INTB_ASSERT BIT(2)
+#define IRQ_INTC_ASSERT BIT(4)
+#define IRQ_INTD_ASSERT BIT(6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE BIT(0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+
+struct exynos_pcie {
+ struct dw_pcie pci;
+ void __iomem *elbi_base;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct regulator_bulk_data supplies[2];
+};
+
+static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
+{
+ struct device *dev = ep->pci.dev;
+ int ret;
+
+ ret = clk_prepare_enable(ep->clk);
+ if (ret) {
+ dev_err(dev, "cannot enable pcie rc clock");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ep->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot enable pcie bus clock");
+ goto err_bus_clk;
+ }
+
+ return 0;
+
+err_bus_clk:
+ clk_disable_unprepare(ep->clk);
+
+ return ret;
+}
+
+static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+{
+ clk_disable_unprepare(ep->bus_clk);
+ clk_disable_unprepare(ep->clk);
+}
+
+static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ if (on)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ if (on)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+}
+
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+}
+
+static int exynos_pcie_start_link(struct dw_pcie *pci)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+
+ /* assert LTSSM enable */
+ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
+{
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct exynos_pcie *ep = arg;
+
+ exynos_pcie_clear_irq_pulse(ep);
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
+{
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+}
+
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
+
+ exynos_pcie_sideband_dbi_r_mode(ep, true);
+ dw_pcie_read(base + reg, size, &val);
+ exynos_pcie_sideband_dbi_r_mode(ep, false);
+ return val;
+}
+
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+ exynos_pcie_sideband_dbi_w_mode(ep, true);
+ dw_pcie_write(base + reg, size, val);
+ exynos_pcie_sideband_dbi_w_mode(ep, false);
+}
+
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
+
+static int exynos_pcie_link_up(struct dw_pcie *pci)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+
+ return (val & PCIE_ELBI_XMLH_LINKUP);
+}
+
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+ pp->bridge->ops = &exynos_pci_ops;
+
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_init(ep->phy);
+ phy_power_on(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
+ .host_init = exynos_pcie_host_init,
+};
+
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", ep);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = exynos_pcie_read_dbi,
+ .write_dbi = exynos_pcie_write_dbi,
+ .link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
+};
+
+static int exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_pcie *ep;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
+
+ ep->phy = devm_of_phy_get(dev, np, NULL);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
+
+ /* External Local Bus interface (ELBI) registers */
+ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(ep->elbi_base))
+ return PTR_ERR(ep->elbi_base);
+
+ ep->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ep->clk);
+ }
+
+ ep->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(ep->bus_clk)) {
+ dev_err(dev, "Failed to get pcie bus clock\n");
+ return PTR_ERR(ep->bus_clk);
+ }
+
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
+
+ ret = exynos_pcie_init_clk_resources(ep);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ep);
+
+ ret = exynos_add_pcie_port(ep, pdev);
+ if (ret < 0)
+ goto fail_probe;
+
+ return 0;
+
+fail_probe:
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return ret;
+}
+
+static int exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
+};
+
+static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
+ .remove = exynos_pcie_remove,
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
+ },
+};
+module_platform_driver(exynos_pcie_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
new file mode 100644
index 0000000000..74703362ae
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -0,0 +1,1612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-designware.h"
+
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
+#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+
+#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx6_pcie_variants {
+ IMX6Q,
+ IMX6SX,
+ IMX6QP,
+ IMX7D,
+ IMX8MQ,
+ IMX8MM,
+ IMX8MP,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
+};
+
+#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
+#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+
+struct imx6_pcie_drvdata {
+ enum imx6_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+ u32 flags;
+ int dbi_length;
+ const char *gpr;
+};
+
+struct imx6_pcie {
+ struct dw_pcie *pci;
+ int reset_gpio;
+ bool gpio_active_high;
+ bool link_is_up;
+ struct clk *pcie_bus;
+ struct clk *pcie_phy;
+ struct clk *pcie_inbound_axi;
+ struct clk *pcie;
+ struct clk *pcie_aux;
+ struct regmap *iomuxc_gpr;
+ u16 msi_ctrl;
+ u32 controller_id;
+ struct reset_control *pciephy_reset;
+ struct reset_control *apps_reset;
+ struct reset_control *turnoff_reset;
+ u32 tx_deemph_gen1;
+ u32 tx_deemph_gen2_3p5db;
+ u32 tx_deemph_gen2_6db;
+ u32 tx_swing_full;
+ u32 tx_swing_low;
+ struct regulator *vpcie;
+ struct regulator *vph;
+ void __iomem *phy_base;
+
+ /* power domain for pcie */
+ struct device *pd_pcie;
+ /* power domain for pcie phy */
+ struct device *pd_pcie_phy;
+ struct phy *phy;
+ const struct imx6_pcie_drvdata *drvdata;
+};
+
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK BIT(16)
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD 0x10
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
+#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
+#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
+
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
+
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4 0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
+
+#define PCIE_PHY_CMN_REG15 0x54
+#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
+
+#define PCIE_PHY_CMN_REG24 0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
+
+#define PCIE_PHY_CMN_REG26 0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx6_pcie->drvdata->variant != IMX8MM &&
+ imx6_pcie->drvdata->variant != IMX8MM_EP &&
+ imx6_pcie->drvdata->variant != IMX8MP &&
+ imx6_pcie->drvdata->variant != IMX8MP_EP);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val, mode;
+
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ else
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ if (imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ mode);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ }
+ break;
+ default:
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ break;
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ bool val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 val;
+ int ret;
+
+ val = PCIE_PHY_CTRL_DATA(addr);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ val |= PCIE_PHY_CTRL_CAP_ADR;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ val = PCIE_PHY_CTRL_DATA(addr);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = PCIE_PHY_CTRL_RD;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
+
+ /* deassert Read signal */
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
+
+ return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ if (ret)
+ return ret;
+
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* capture data */
+ var |= PCIE_PHY_CTRL_CAP_DAT;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = PCIE_PHY_CTRL_WR;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
+ if (ret)
+ return ret;
+
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
+
+ return 0;
+}
+
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+ fallthrough;
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
+
+ imx6_pcie_configure_type(imx6_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u16 val;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+{
+ u16 tmp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return;
+
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+}
+
+#ifdef CONFIG_ARM
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+ unsigned long instr = *(unsigned long *)pc;
+ int reg = (instr >> 12) & 15;
+
+ /*
+ * If the instruction being executed was a read,
+ * make it look like it read all-ones.
+ */
+ if ((instr & 0x0c100000) == 0x04100000) {
+ unsigned long val;
+
+ if (instr & 0x00400000)
+ val = 255;
+ else
+ val = -1;
+
+ regs->uregs[reg] = val;
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct device_link *link;
+
+ /* Do nothing when in a single power domain */
+ if (dev->pm_domain)
+ return 0;
+
+ imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pd_pcie))
+ return PTR_ERR(imx6_pcie->pd_pcie);
+ /* Do nothing when power domain missing */
+ if (!imx6_pcie->pd_pcie)
+ return 0;
+ link = device_link_add(dev, imx6_pcie->pd_pcie,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ return -EINVAL;
+ }
+
+ imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pd_pcie_phy))
+ return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+ link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ unsigned int offset;
+ int ret = 0;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_axi clock\n");
+ break;
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ /*
+ * the async reset input need ref clock to sync internally,
+ * when the ref clock comes after reset, internal synced
+ * reset time is too short, cannot meet the requirement.
+ * add one ~10us delay here.
+ */
+ usleep_range(10, 100);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ break;
+ case IMX7D:
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ break;
+ }
+
+ offset = imx6_pcie_grp_offset(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+ break;
+ }
+
+ return ret;
+}
+
+static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_phy clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_bus clock\n");
+ goto err_pcie_bus;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clock\n");
+ goto err_pcie;
+ }
+
+ ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie ref clock\n");
+ goto err_ref_clk;
+ }
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+
+ return ret;
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ imx6_pcie_disable_ref_clk(imx6_pcie);
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ }
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
+}
+
+static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ break;
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+
+ /* Workaround for ERR010728, failure of PCI-e PLL VCO to
+ * oscillate, especially when cold. This turns off "Duty-cycle
+ * Corrector" and other mysterious undocumented things.
+ */
+ if (likely(imx6_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
+ | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+
+ imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+ usleep_range(200, 500);
+ break;
+ case IMX6Q: /* Nothing to do */
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ break;
+ }
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 tmp;
+ unsigned int retries;
+
+ for (retries = 0; retries < 200; retries++) {
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(100, 1000);
+ }
+
+ dev_err(dev, "Speed change timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2,
+ IMX6Q_GPR12_PCIE_CTL_2);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_deassert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
+static int imx6_pcie_start_link(struct dw_pcie *pci)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ int ret;
+
+ /*
+ * Force Gen1 operation when starting the link. In case the link is
+ * started in Gen2 mode, there is a possibility the devices on the
+ * bus will not be detected at all. This happens with PCIe switches.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+
+ if (pci->link_gen > 1) {
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= pci->link_gen;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+
+ /*
+ * Start Directed Speed Change so the best possible
+ * speed both link partners support can be negotiated.
+ */
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ if (imx6_pcie->drvdata->flags &
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ /*
+ * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+ * from i.MX6 family when no link speed transition
+ * occurs and we go Gen1 -> yep, Gen1. The difference
+ * is that, in such case, it will not be cleared by HW
+ * which will cause the following code to report false
+ * failure.
+ */
+
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ }
+
+ /* Make sure link training is finished as well! */
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+ } else {
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
+ }
+
+ imx6_pcie->link_is_up = true;
+ tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
+ return 0;
+
+err_reset_phy:
+ imx6_pcie->link_is_up = false;
+ dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
+ imx6_pcie_reset_phy(imx6_pcie);
+ return 0;
+}
+
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* Turn off PCIe LTSSM */
+ imx6_pcie_ltssm_disable(dev);
+}
+
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ int ret;
+
+ if (imx6_pcie->vpcie) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+
+ ret = imx6_pcie_clk_enable(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_init(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_off;
+ }
+ }
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ imx6_setup_phy_mpll(imx6_pcie);
+
+ return 0;
+
+err_phy_off:
+ if (imx6_pcie->phy)
+ phy_exit(imx6_pcie->phy);
+err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+err_reg_disable:
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+ return ret;
+}
+
+static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (imx6_pcie->phy) {
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
+ }
+ imx6_pcie_clk_disable(imx6_pcie);
+
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+}
+
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+ .host_init = imx6_pcie_host_init,
+ .host_deinit = imx6_pcie_host_exit,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = imx6_pcie_start_link,
+ .stop_link = imx6_pcie_stop_link,
+};
+
+static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &imx8m_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = imx6_pcie_ep_init,
+ .raise_irq = imx6_pcie_ep_raise_irq,
+ .get_features = imx6_pcie_ep_get_features,
+};
+
+static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ unsigned int pcie_dbi2_offset;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ imx6_pcie_host_init(pp);
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
+ pcie_dbi2_offset = SZ_1M;
+ break;
+ default:
+ pcie_dbi2_offset = SZ_4K;
+ break;
+ }
+ pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ return 0;
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+ struct device *dev = imx6_pcie->pci->dev;
+
+ /* Some variants have a turnoff reset in DT */
+ if (imx6_pcie->turnoff_reset) {
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+ goto pm_turnoff_sleep;
+ }
+
+ /* Others poke directly at IOMUXC registers */
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ default:
+ dev_err(dev, "PME_Turn_Off not implemented\n");
+ return;
+ }
+
+ /*
+ * Components with an upstream port must respond to
+ * PME_Turn_Off with PME_TO_Ack but we can't check.
+ *
+ * The standard recommends a 1-10ms timeout after which to
+ * proceed anyway as if acks were received.
+ */
+pm_turnoff_sleep:
+ usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+{
+ u8 offset;
+ u16 val;
+ struct dw_pcie *pci = imx6_pcie->pci;
+
+ if (pci_msi_enabled()) {
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ if (save) {
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ imx6_pcie->msi_ctrl = val;
+ } else {
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = imx6_pcie->msi_ctrl;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+ }
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ return 0;
+
+ imx6_pcie_msi_save_restore(imx6_pcie, true);
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_stop_link(imx6_pcie->pci);
+ imx6_pcie_host_exit(pp);
+
+ return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+ int ret;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ return 0;
+
+ ret = imx6_pcie_host_init(pp);
+ if (ret)
+ return ret;
+ imx6_pcie_msi_save_restore(imx6_pcie, false);
+ dw_pcie_setup_rc(pp);
+
+ if (imx6_pcie->link_is_up)
+ imx6_pcie_start_link(imx6_pcie->pci);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
+};
+
+static int imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct imx6_pcie *imx6_pcie;
+ struct device_node *np;
+ struct resource *dbi_base;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u16 val;
+
+ imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &imx6_pcie_host_ops;
+
+ imx6_pcie->pci = pci;
+ imx6_pcie->drvdata = of_device_get_match_data(dev);
+
+ /* Find the PHY if one is defined, only imx7d uses it */
+ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+ if (np) {
+ struct resource res;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Unable to map PCIe PHY\n");
+ return ret;
+ }
+ imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx6_pcie->phy_base))
+ return PTR_ERR(imx6_pcie->phy_base);
+ }
+
+ pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(node,
+ "reset-gpio-active-high");
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high ?
+ GPIOF_OUT_INIT_HIGH :
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
+ if (ret) {
+ dev_err(dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->reset_gpio;
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(imx6_pcie->pcie_bus))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
+ "pcie_bus clock source missing or invalid\n");
+
+ imx6_pcie->pcie = devm_clk_get(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pcie))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
+ "pcie clock source missing or invalid\n");
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
+ "pcie_inbound_axi clock missing or invalid\n");
+ break;
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ fallthrough;
+ case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
+
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
+ "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx6_pcie->pciephy_reset);
+ }
+
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset)) {
+ dev_err(dev, "Failed to get PCIE APPS reset control\n");
+ return PTR_ERR(imx6_pcie->apps_reset);
+ }
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ "failed to get pcie apps reset control\n");
+
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+
+ break;
+ default:
+ break;
+ }
+ /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
+ if (imx6_pcie->phy == NULL) {
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
+ }
+
+
+ /* Grab turnoff reset */
+ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ dev_err(dev, "Failed to get TURNOFF reset control\n");
+ return PTR_ERR(imx6_pcie->turnoff_reset);
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+
+ /* Grab PCIe PHY Tx Settings */
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
+ &imx6_pcie->tx_deemph_gen1))
+ imx6_pcie->tx_deemph_gen1 = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
+ &imx6_pcie->tx_deemph_gen2_3p5db))
+ imx6_pcie->tx_deemph_gen2_3p5db = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
+ &imx6_pcie->tx_deemph_gen2_6db))
+ imx6_pcie->tx_deemph_gen2_6db = 20;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-full",
+ &imx6_pcie->tx_swing_full))
+ imx6_pcie->tx_swing_full = 127;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-low",
+ &imx6_pcie->tx_swing_low))
+ imx6_pcie->tx_swing_low = 127;
+
+ /* Limit link speed */
+ pci->link_gen = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
+
+ imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx6_pcie->vpcie)) {
+ if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vpcie);
+ imx6_pcie->vpcie = NULL;
+ }
+
+ imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx6_pcie->vph)) {
+ if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vph);
+ imx6_pcie->vph = NULL;
+ }
+
+ platform_set_drvdata(pdev, imx6_pcie);
+
+ ret = imx6_pcie_attach_pd(dev);
+ if (ret)
+ return ret;
+
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ }
+ }
+
+ return 0;
+}
+
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+ /* bring down link, so bootloader gets clean state in case of reboot */
+ imx6_pcie_assert_core_reset(imx6_pcie);
+}
+
+static const struct imx6_pcie_drvdata drvdata[] = {
+ [IMX6Q] = {
+ .variant = IMX6Q,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX6SX] = {
+ .variant = IMX6SX,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX6QP] = {
+ .variant = IMX6QP,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX7D] = {
+ .variant = IMX7D,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
+ },
+ [IMX8MQ] = {
+ .variant = IMX8MQ,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ },
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ },
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ },
+};
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
+ { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+ { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+ { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ {},
+};
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .of_match_table = imx6_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &imx6_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = imx6_pcie_probe,
+ .shutdown = imx6_pcie_shutdown,
+};
+
+static void imx6_pcie_quirk(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct dw_pcie_rp *pp = bus->sysdata;
+
+ /* Bus parent is the PCI bridge, its parent is this platform driver */
+ if (!bus->dev.parent || !bus->dev.parent->parent)
+ return;
+
+ /* Make sure we only quirk devices associated with this driver */
+ if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ return;
+
+ if (pci_is_root_bus(bus)) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ /*
+ * Limit config length to avoid the kernel reading beyond
+ * the register set and causing an abort on i.MX 6Quad
+ */
+ if (imx6_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ dev_info(&dev->dev, "Limiting cfg_size to %d\n",
+ dev->cfg_size);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+ PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+
+static int __init imx6_pcie_init(void)
+{
+#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
+ /*
+ * Since probe() can be deferred we need to make sure that
+ * hook_fault_code is not called after __init memory is freed
+ * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+ * we can install the handler here without risking it
+ * accessing some uninitialized driver state.
+ */
+ hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "external abort on non-linefetch");
+#endif
+
+ return platform_driver_register(&imx6_pcie_driver);
+}
+device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
new file mode 100644
index 0000000000..cf38365613
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * https://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PCIE_VENDORID_MASK 0xffff
+#define PCIE_DEVICEID_SHIFT 16
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define LTSSM_EN_VAL BIT(0)
+#define OB_XLAT_EN_VAL BIT(1)
+#define DBI_CS2 BIT(5)
+
+#define CFG_SETUP 0x008
+#define CFG_BUS(x) (((x) & 0xff) << 16)
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
+#define CFG_FUNC(x) ((x) & 0x7)
+#define CFG_TYPE1 BIT(24)
+
+#define OB_SIZE 0x030
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
+#define OB_ENABLEN BIT(0)
+#define OB_WIN_SIZE 8 /* 8MB */
+
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+
+#define MSI_IRQ 0x054
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
+#define MSI_IRQ_OFFSET 4
+
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System error */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
+
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ u32 version;
+};
+
+struct keystone_pcie {
+ struct dw_pcie *pci;
+ /* PCI Device ID */
+ u32 device_id;
+ int legacy_host_irqs[PCI_NUM_INTX];
+ struct device_node *legacy_intc_np;
+
+ int msi_host_irq;
+ int num_lanes;
+ u32 num_viewport;
+ struct phy **phy;
+ struct device_link **link;
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ /* Application register space */
+ void __iomem *va_app_base; /* DT 1st resource */
+ struct resource app;
+ bool is_am6;
+};
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+ u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ struct dw_pcie *pci;
+ u64 msi_target;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
+ BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void ks_pcie_msi_unmask(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
+ BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+ int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 pending;
+
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
+
+ if (BIT(0) & pending) {
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+ }
+
+ /* EOI the INTx interrupt */
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+ u32 reg;
+ struct device *dev = ks_pcie->pci->dev;
+
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+ if (!reg)
+ return IRQ_NONE;
+
+ if (reg & ERR_SYS)
+ dev_err(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_err(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_CORR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
+ dev_err(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
+ dev_err(dev, "ECRC Error\n");
+
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_pcie_ack_legacy_irq,
+ .irq_mask = ks_pcie_mask_legacy_irq,
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .map = ks_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u64 start, end;
+ struct resource *mem;
+ int i;
+
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ start = mem->start;
+ end = mem->end;
+
+ /* Disable BARs for inbound access */
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ if (ks_pcie->is_am6)
+ return;
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; i < num_viewport && (start < end); i++) {
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+ lower_32_bits(start) | OB_ENABLEN);
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+ upper_32_bits(start));
+ start += OB_WIN_SIZE * SZ_1M;
+ }
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (!pci_is_root_bus(bus->parent))
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return pp->va_cfg0_base + where;
+}
+
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+ * @bus: A pointer to the PCI bus structure.
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ if (!pci_is_root_bus(bus))
+ return 0;
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ return 0;
+}
+
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = ks_pcie_v3_65_add_bus,
+};
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ /* Disable Link training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+ return 0;
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { 0, },
+ };
+
+ if (pci_is_root_bus(bus))
+ bridge = dev;
+
+ /* look for the host bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+ }
+
+ if (!bridge)
+ return;
+
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = desc->irq_data.hwirq;
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ u32 offset = irq - ks_pcie->msi_host_irq;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, reg, pos;
+
+ dev_dbg(dev, "%s, irq %d\n", __func__, irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
+{
+ struct device *dev = ks_pcie->pci->dev;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
+
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
+
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
+
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
+ }
+
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
+}
+
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since legacy interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_host_irqs[i] = irq;
+
+ irq_set_chained_handler_and_data(irq,
+ ks_pcie_legacy_irq_handler,
+ ks_pcie);
+ }
+
+ legacy_irq_domain =
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+err:
+ of_node_put(intc_np);
+ return ret;
+}
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+#endif
+
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+ int ret;
+ unsigned int id;
+ struct regmap *devctrl_regs;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = regmap_read(devctrl_regs, offset, &id);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ int ret;
+
+ pp->bridge->ops = &ks_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ks_pcie_stop_link(pci);
+ ks_pcie_setup_rc_app_regs(ks_pcie);
+ writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ pci->dbi_base + PCI_IO_BASE);
+
+ ret = ks_pcie_init_id(ks_pcie);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_ARM
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+#endif
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+ .host_init = ks_pcie_host_init,
+ .msi_host_init = ks_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .host_init = ks_pcie_host_init,
+};
+
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
+{
+ struct keystone_pcie *ks_pcie = priv;
+
+ return ks_pcie_handle_error_irq(ks_pcie);
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
+
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
+ .link_up = ks_pcie_link_up,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ u8 int_pin;
+
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
+
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[2] = SZ_1M,
+ .bar_fixed_size[3] = SZ_64K,
+ .bar_fixed_size[4] = 256,
+ .bar_fixed_size[5] = SZ_1M,
+ .align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .ep_init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
+};
+
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
+{
+ int num_lanes = ks_pcie->num_lanes;
+
+ while (num_lanes--) {
+ phy_power_off(ks_pcie->phy[num_lanes]);
+ phy_exit(ks_pcie->phy[num_lanes]);
+ }
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+ int i;
+ int ret;
+ int num_lanes = ks_pcie->num_lanes;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_init(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(ks_pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(ks_pcie->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(ks_pcie->phy[i]);
+ phy_exit(ks_pcie->phy[i]);
+ }
+
+ return ret;
+}
+
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .version = DW_PCIE_VER_365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+
+static int ks_pcie_probe(struct platform_device *pdev)
+{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+ struct dw_pcie *pci;
+ struct keystone_pcie *ks_pcie;
+ struct device_link **link;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ void __iomem *base;
+ u32 num_viewport;
+ struct phy **phy;
+ u32 num_lanes;
+ char name[10];
+ u32 version;
+ int ret;
+ int irq;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
+ ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
+ if (!ks_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
+ pci->dev = dev;
+ pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
+ if (ret < 0) {
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+ if (ret)
+ num_lanes = 1;
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lanes; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_link;
+ }
+
+ if (!phy[i])
+ continue;
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ ks_pcie->np = np;
+ ks_pcie->pci = pci;
+ ks_pcie->link = link;
+ ks_pcie->num_lanes = num_lanes;
+ ks_pcie->phy = phy;
+
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
+ ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
+ }
+
+ platform_set_drvdata(pdev, ks_pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ if (dw_pcie_ver_is_ge(pci, 480A))
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ else
+ ret = ks_pcie_set_mode(dev);
+ if (ret < 0)
+ goto err_get_sync;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ goto err_get_sync;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
+
+ return 0;
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+ while (--i >= 0 && link[i])
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+static int ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+ int num_lanes = ks_pcie->num_lanes;
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+ while (num_lanes--)
+ device_link_del(link[num_lanes]);
+
+ return 0;
+}
+
+static struct platform_driver ks_pcie_driver = {
+ .probe = ks_pcie_probe,
+ .remove = ks_pcie_remove,
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = ks_pcie_of_match,
+ },
+};
+builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 0000000000..b1faf41a2f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PEX_PF0_CONFIG 0xC0014
+#define PEX_PF0_CFG_READY BIT(0)
+
+/* PEX PFa PCIE PME and message interrupt registers*/
+#define PEX_PF0_PME_MES_DR 0xC0020
+#define PEX_PF0_PME_MES_DR_LUD BIT(7)
+#define PEX_PF0_PME_MES_DR_LDD BIT(9)
+#define PEX_PF0_PME_MES_DR_HRD BIT(10)
+
+#define PEX_PF0_PME_MES_IER 0xC0028
+#define PEX_PF0_PME_MES_IER_LUDIE BIT(7)
+#define PEX_PF0_PME_MES_IER_LDDIE BIT(9)
+#define PEX_PF0_PME_MES_IER_HRDIE BIT(10)
+
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
+};
+
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+ int irq;
+ u32 lnkcap;
+ bool big_endian;
+};
+
+static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ return ioread32be(pci->dbi_base + offset);
+ else
+ return ioread32(pci->dbi_base + offset);
+}
+
+static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ iowrite32be(value, pci->dbi_base + offset);
+ else
+ iowrite32(value, pci->dbi_base + offset);
+}
+
+static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+{
+ struct ls_pcie_ep *pcie = dev_id;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val, cfg;
+ u8 offset;
+
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PEX_PF0_PME_MES_DR_LUD) {
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ /*
+ * The values of the Maximum Link Width and Supported Link
+ * Speed from the Link Capabilities Register will be lost
+ * during link down or hot reset. Restore initial value
+ * that configured by the Reset Configuration Word (RCW).
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+ cfg |= PEX_PF0_CFG_READY;
+ ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ dw_pcie_ep_linkup(&pci->ep);
+
+ dev_dbg(pci->dev, "Link up\n");
+ } else if (val & PEX_PF0_PME_MES_DR_LDD) {
+ dev_dbg(pci->dev, "Link down\n");
+ pci_epc_linkdown(pci->ep.epc);
+ } else if (val & PEX_PF0_PME_MES_DR_HRD) {
+ dev_dbg(pci->dev, "Hot reset\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "pme");
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register PCIe IRQ\n");
+ return ret;
+ }
+
+ /* Enable interrupts */
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
+ PEX_PF0_PME_MES_IER_LUDIE;
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
+ enum pci_barno bar;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
+ .ep_init = ls_pcie_ep_init,
+ .raise_irq = ls_pcie_ep_raise_irq,
+ .get_features = ls_pcie_ep_get_features,
+ .func_conf_select = ls_pcie_ep_func_conf_select,
+};
+
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
+ .func_offset = 0x8000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
+ { },
+};
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
+ struct resource *dbi_base;
+ u8 offset;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->linkup_notifier = true;
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->ep.ops = &ls_pcie_ep_ops;
+
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ platform_set_drvdata(pdev, pcie);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ return ls_pcie_ep_interrupt_init(pcie, pdev);
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+ .driver = {
+ .name = "layerscape-pcie-ep",
+ .of_match_table = ls_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
new file mode 100644
index 0000000000..b931d59765
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2021 NXP
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+#define PCIE_IATU_NUM 6
+
+struct ls_pcie_drvdata {
+ const u32 pf_off;
+ bool pm_support;
+};
+
+struct ls_pcie {
+ struct dw_pcie *pci;
+ const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_base;
+ bool big_endian;
+};
+
+#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr)
+#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
+
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u32 header_type;
+
+ header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
+}
+
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+ u32 val;
+ struct dw_pcie *pci = pcie->pci;
+
+ val = ioread32(pci->dbi_base + PCIE_STRFMR1);
+ val &= 0xDFFFFFFF;
+ iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
+}
+
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
+static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off)
+{
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_base + off);
+
+ return ioread32(pcie->pf_base + off);
+}
+
+static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ iowrite32be(val, pcie->pf_base + off);
+ else
+ iowrite32(val, pcie->pf_base + off);
+}
+
+static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR),
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
+ if (ret)
+ dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
+}
+
+static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
+ * to exit L2 state.
+ */
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ /*
+ * L2 exit timeout of 10ms is not defined in the specifications,
+ * it was chosen based on empirical observations.
+ */
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S),
+ 1000,
+ 10000);
+ if (ret)
+ dev_err(pcie->pci->dev, "L2 exit timeout\n");
+}
+
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ ls_pcie_fix_error_response(pcie);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ ls_pcie_clear_multifunction(pcie);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ ls_pcie_drop_msg_tlp(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+ .host_init = ls_pcie_host_init,
+ .pme_turn_off = ls_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
+ .pm_support = false,
+};
+
+static const struct ls_pcie_drvdata layerscape_drvdata = {
+ .pf_off = 0xc0000,
+ .pm_support = true,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
+ { },
+};
+
+static int ls_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie *pcie;
+ struct resource *dbi_base;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->pp.ops = &ls_pcie_host_ops;
+
+ pcie->pci = pci;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off;
+
+ if (!ls_pcie_is_bridge(pcie))
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ return dw_pcie_suspend_noirq(pcie->pci);
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ ls_pcie_exit_from_l2(&pcie->pci->pp);
+
+ return dw_pcie_resume_noirq(pcie->pci);
+}
+
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
+};
+
+static struct platform_driver ls_pcie_driver = {
+ .probe = ls_pcie_probe,
+ .driver = {
+ .name = "layerscape-pcie",
+ .of_match_table = ls_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
new file mode 100644
index 0000000000..407558f5d7
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0 0x0
+#define APP_LTSSM_ENABLE BIT(7)
+
+#define PCIE_CFG_STATUS12 0x30
+#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
+#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17 0x44
+#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT 4000
+#define PORT_CLK_RATE 100000000UL
+#define MAX_PAYLOAD_SIZE 256
+#define MAX_READ_REQ_SIZE 256
+#define PCIE_RESET_DELAY 500
+#define PCIE_SHARED_RESET 1
+#define PCIE_NORMAL_RESET 0
+
+enum pcie_data_rate {
+ PCIE_GEN1,
+ PCIE_GEN2,
+ PCIE_GEN3,
+ PCIE_GEN4
+};
+
+struct meson_pcie_clk_res {
+ struct clk *clk;
+ struct clk *port_clk;
+ struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+ struct reset_control *port;
+ struct reset_control *apb;
+};
+
+struct meson_pcie {
+ struct dw_pcie pci;
+ void __iomem *cfg_base;
+ struct meson_pcie_clk_res clk_res;
+ struct meson_pcie_rc_reset mrst;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+ const char *id,
+ u32 reset_type)
+{
+ struct device *dev = mp->pci.dev;
+ struct reset_control *reset;
+
+ if (reset_type == PCIE_SHARED_RESET)
+ reset = devm_reset_control_get_shared(dev, id);
+ else
+ reset = devm_reset_control_get(dev, id);
+
+ return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+ if (IS_ERR(mrst->port))
+ return PTR_ERR(mrst->port);
+ reset_control_deassert(mrst->port);
+
+ mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->apb))
+ return PTR_ERR(mrst->apb);
+ reset_control_deassert(mrst->apb);
+
+ return 0;
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+ struct meson_pcie *mp)
+{
+ struct dw_pcie *pci = &mp->pci;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
+
+ return 0;
+}
+
+static int meson_pcie_power_on(struct meson_pcie *mp)
+{
+ int ret = 0;
+
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void meson_pcie_power_off(struct meson_pcie *mp)
+{
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+}
+
+static int meson_pcie_reset(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+ int ret = 0;
+
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
+
+ reset_control_assert(mrst->port);
+ reset_control_assert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->port);
+ reset_control_deassert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static inline void meson_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk\n");
+ return ERR_PTR(ret);
+ }
+
+ devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk);
+
+ return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+ struct device *dev = mp->pci.dev;
+ struct meson_pcie_clk_res *res = &mp->clk_res;
+
+ res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+ if (IS_ERR(res->port_clk))
+ return PTR_ERR(res->port_clk);
+
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
+ if (IS_ERR(res->general_clk))
+ return PTR_ERR(res->general_clk);
+
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
+ if (IS_ERR(res->clk))
+ return PTR_ERR(res->clk);
+
+ return 0;
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+ gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
+}
+
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
+{
+ u32 val;
+
+ val = meson_cfg_readl(mp, PCIE_CFG0);
+ val |= APP_LTSSM_ENABLE;
+ meson_cfg_writel(mp, val, PCIE_CFG0);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+ struct device *dev = mp->pci.dev;
+
+ /*
+ * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+ * So if input size is not 2^order alignment or less than 2^7 or bigger
+ * than 2^12, just set to default size 2^(1+7).
+ */
+ if (!is_power_of_2(size) || size < 128 || size > 4096) {
+ dev_warn(dev, "payload size %d, set to default 256\n", size);
+ return 1;
+ }
+
+ return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+ struct dw_pcie *pci = &mp->pci;
+ u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int max_payload_size = meson_size_to_payload(mp, size);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+ struct dw_pcie *pci = &mp->pci;
+ u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int max_rd_req_size = meson_size_to_payload(mp, size);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+}
+
+static int meson_pcie_start_link(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+
+ meson_pcie_ltssm_enable(mp);
+ meson_pcie_assert_reset(mp);
+
+ return 0;
+}
+
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * There is a bug in the MESON AXG PCIe controller whereby software
+ * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+ * the return value in the config accessors.
+ */
+ if ((where & ~3) == PCI_CLASS_REVISION) {
+ if (size <= 2)
+ *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3));
+ *val &= ~0xffffff00;
+ *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 speed_okay = 0;
+ u32 cnt = 0;
+ u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+ do {
+ state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+ state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+ smlh_up = IS_SMLH_LINK_UP(state12);
+ rdlh_up = IS_RDLH_LINK_UP(state12);
+ ltssm_up = IS_LTSSM_UP(state12);
+
+ if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+ speed_okay = 1;
+
+ if (smlh_up)
+ dev_dbg(dev, "smlh_link_up is on\n");
+ if (rdlh_up)
+ dev_dbg(dev, "rdlh_link_up is on\n");
+ if (ltssm_up)
+ dev_dbg(dev, "ltssm_up is on\n");
+ if (speed_okay)
+ dev_dbg(dev, "speed_okay\n");
+
+ if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+ return 1;
+
+ cnt++;
+
+ udelay(10);
+ } while (cnt < WAIT_LINKUP_TIMEOUT);
+
+ dev_err(dev, "error: wait linkup timeout\n");
+ return 0;
+}
+
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+
+ pp->bridge->ops = &meson_pci_ops;
+
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+ .host_init = meson_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct meson_pcie *mp;
+ int ret;
+
+ mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ pci = &mp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
+ pci->num_lanes = 1;
+
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
+ }
+
+ mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mp->reset_gpio)) {
+ dev_err(dev, "get reset gpio failed\n");
+ return PTR_ERR(mp->reset_gpio);
+ }
+
+ ret = meson_pcie_get_resets(mp);
+ if (ret) {
+ dev_err(dev, "get reset resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_get_mems(pdev, mp);
+ if (ret) {
+ dev_err(dev, "get memory resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ ret = meson_pcie_probe_clocks(mp);
+ if (ret) {
+ dev_err(dev, "init clock resources failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ platform_set_drvdata(pdev, mp);
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0) {
+ dev_err(dev, "Add PCIe port failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ return 0;
+
+err_phy:
+ meson_pcie_power_off(mp);
+ return ret;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie",
+ },
+ {
+ .compatible = "amlogic,g12a-pcie",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
+
+static struct platform_driver meson_pcie_driver = {
+ .probe = meson_pcie_probe,
+ .driver = {
+ .name = "meson-pcie",
+ .of_match_table = meson_pcie_of_match,
+ },
+};
+
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 0000000000..b8cb77c9c4
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base))
+ return PTR_ERR(al_pcie->dbi_base);
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+const struct pci_ecam_ops al_pcie_ops = {
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_AL
+
+#include <linux/of_pci.h>
+#include "pcie-designware.h"
+
+#define AL_PCIE_REV_ID_2 2
+#define AL_PCIE_REV_ID_3 3
+#define AL_PCIE_REV_ID_4 4
+
+#define AXI_BASE_OFFSET 0x0
+
+#define DEVICE_ID_OFFSET 0x16c
+
+#define DEVICE_REV_ID 0x0
+#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16)
+
+#define DEVICE_REV_ID_DEV_ID_X4 0
+#define DEVICE_REV_ID_DEV_ID_X8 2
+#define DEVICE_REV_ID_DEV_ID_X16 4
+
+#define OB_CTRL_REV1_2_OFFSET 0x0040
+#define OB_CTRL_REV3_5_OFFSET 0x0030
+
+#define CFG_TARGET_BUS 0x0
+#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0)
+#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8)
+
+#define CFG_CONTROL 0x4
+#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8)
+#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16)
+
+struct al_pcie_reg_offsets {
+ unsigned int ob_ctrl;
+};
+
+struct al_pcie_target_bus_cfg {
+ u8 reg_val;
+ u8 reg_mask;
+ u8 ecam_mask;
+};
+
+struct al_pcie {
+ struct dw_pcie *pci;
+ void __iomem *controller_base; /* base of PCIe unit (not DW core) */
+ struct device *dev;
+ resource_size_t ecam_size;
+ unsigned int controller_rev_id;
+ struct al_pcie_reg_offsets reg_offsets;
+ struct al_pcie_target_bus_cfg target_bus_cfg;
+};
+
+#define to_al_pcie(x) dev_get_drvdata((x)->dev)
+
+static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
+{
+ return readl_relaxed(pcie->controller_base + offset);
+}
+
+static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, pcie->controller_base + offset);
+}
+
+static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
+{
+ u32 dev_rev_id_val;
+ u32 dev_id_val;
+
+ dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
+ DEVICE_ID_OFFSET +
+ DEVICE_REV_ID);
+ dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
+
+ switch (dev_id_val) {
+ case DEVICE_REV_ID_DEV_ID_X4:
+ *rev_id = AL_PCIE_REV_ID_2;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X8:
+ *rev_id = AL_PCIE_REV_ID_3;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X16:
+ *rev_id = AL_PCIE_REV_ID_4;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
+ dev_id_val);
+ return -EINVAL;
+ }
+
+ dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
+
+ return 0;
+}
+
+static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
+{
+ switch (pcie->controller_rev_id) {
+ case AL_PCIE_REV_ID_2:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
+ break;
+ case AL_PCIE_REV_ID_3:
+ case AL_PCIE_REV_ID_4:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
+ pcie->controller_rev_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
+ u8 target_bus,
+ u8 mask_target_bus)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
+ FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
+
+ al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
+ pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
+ reg);
+}
+
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
+ struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
+ unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
+ unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
+
+ if (busnr_reg != target_bus_cfg->reg_val) {
+ dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
+ target_bus_cfg->reg_val, busnr_reg);
+ target_bus_cfg->reg_val = busnr_reg;
+ al_pcie_target_bus_set(pcie,
+ target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+ }
+
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
+}
+
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void al_pcie_config_prepare(struct al_pcie *pcie)
+{
+ struct al_pcie_target_bus_cfg *target_bus_cfg;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ unsigned int ecam_bus_mask;
+ u32 cfg_control_offset;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u32 cfg_control;
+ u32 reg;
+ struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+
+ target_bus_cfg = &pcie->target_bus_cfg;
+
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+ if (ecam_bus_mask > 255) {
+ dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
+ ecam_bus_mask = 255;
+ }
+
+ /* This portion is taken from the transaction address */
+ target_bus_cfg->ecam_mask = ecam_bus_mask;
+ /* This portion is taken from the cfg_target_bus reg */
+ target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
+
+ al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
+
+ /* Set the valid values of secondary and subordinate buses */
+ cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
+ CFG_CONTROL;
+
+ cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
+
+ reg = cfg_control &
+ ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
+
+ reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
+ FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+ al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+}
+
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct al_pcie *pcie = to_al_pcie(pci);
+ int rc;
+
+ pp->bridge->child_ops = &al_child_pci_ops;
+
+ rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
+ if (rc)
+ return rc;
+
+ rc = al_pcie_reg_offsets_set(pcie);
+ if (rc)
+ return rc;
+
+ al_pcie_config_prepare(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops al_pcie_host_ops = {
+ .host_init = al_pcie_host_init,
+};
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *controller_res;
+ struct resource *ecam_res;
+ struct al_pcie *al_pcie;
+ struct dw_pcie *pci;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->pp.ops = &al_pcie_host_ops;
+
+ al_pcie->pci = pci;
+ al_pcie->dev = dev;
+
+ ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (!ecam_res) {
+ dev_err(dev, "couldn't find 'config' reg in DT\n");
+ return -ENOENT;
+ }
+ al_pcie->ecam_size = resource_size(ecam_res);
+
+ controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "controller");
+ al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
+ if (IS_ERR(al_pcie->controller_base)) {
+ dev_err(dev, "couldn't remap controller base %pR\n",
+ controller_res);
+ return PTR_ERR(al_pcie->controller_base);
+ }
+
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
+
+ platform_set_drvdata(pdev, al_pcie);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+ { .compatible = "amazon,al-alpine-v2-pcie",
+ },
+ { .compatible = "amazon,al-alpine-v3-pcie",
+ },
+ {},
+};
+
+static struct platform_driver al_pcie_driver = {
+ .driver = {
+ .name = "al-pcie",
+ .of_match_table = al_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = al_pcie_probe,
+};
+builtin_platform_driver(al_pcie_driver);
+
+#endif /* CONFIG_PCIE_AL*/
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
new file mode 100644
index 0000000000..5c999e15c3
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+
+#include "pcie-designware.h"
+
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
+struct armada8k_pcie {
+ struct dw_pcie *pci;
+ struct clk *clk;
+ struct clk *clk_reg;
+ struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+ unsigned int phy_count;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET 0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
+#define PCIE_APP_LTSSM_EN BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT 4
+#define PCIE_DEVICE_TYPE_MASK 0xF
+#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
+#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
+#define PCIE_INT_A_ASSERT_MASK BIT(9)
+#define PCIE_INT_B_ASSERT_MASK BIT(10)
+#define PCIE_INT_C_ASSERT_MASK BIT(11)
+#define PCIE_INT_D_ASSERT_MASK BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
+/*
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE 0x3511
+#define AWCACHE_DEFAULT_VALUE 0x5311
+
+#define DOMAIN_OUTER_SHAREABLE 0x2
+#define AX_USER_DOMAIN_MASK 0x3
+#define AX_USER_DOMAIN_SHIFT 4
+
+#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
+
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+ pcie->phy_count);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(pcie->phy[i])) {
+ if (PTR_ERR(pcie->phy[i]) != -ENODEV)
+ return PTR_ERR(pcie->phy[i]);
+
+ pcie->phy[i] = NULL;
+ continue;
+ }
+
+ pcie->phy_count++;
+ }
+
+ /* Old bindings miss the PHY handle, so just warn if there is no PHY */
+ if (!pcie->phy_count)
+ dev_warn(dev, "No available PHY\n");
+
+ ret = armada8k_pcie_enable_phys(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+ return ret;
+}
+
+static int armada8k_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
+
+ if ((reg & mask) == mask)
+ return 1;
+
+ dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+ return 0;
+}
+
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ return 0;
+}
+
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (!dw_pcie_link_up(pci)) {
+ /* Disable LTSSM state machine to enable configuration */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_APP_LTSSM_EN);
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+ }
+
+ /* Set the device to root complex mode */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+ reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ /* Set the PCIe master AxCache attributes */
+ dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+ dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
+
+ /* Set the PCIe master AxDomain attributes */
+ reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
+
+ reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
+
+ /* Enable INT A-D interrupts */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
+ reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+ PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
+
+ return 0;
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+ struct armada8k_pcie *pcie = arg;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val;
+
+ /*
+ * Interrupts are directly handled by the device driver of the
+ * PCI device. However, they are also latched into the PCIe
+ * controller, so we simply discard them.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
+
+ return IRQ_HANDLED;
+}
+
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+ .host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->ops = &armada8k_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+ IRQF_SHARED, "armada8k-pcie", pcie);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
+};
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+ struct dw_pcie *pci;
+ struct armada8k_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ pcie->pci = pci;
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ pcie->clk_reg = devm_clk_get(dev, "reg");
+ if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+ if (!IS_ERR(pcie->clk_reg)) {
+ ret = clk_prepare_enable(pcie->clk_reg);
+ if (ret)
+ goto fail_clkreg;
+ }
+
+ /* Get the dw-pcie unit configuration/control registers base. */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
+ if (IS_ERR(pci->dbi_base)) {
+ ret = PTR_ERR(pci->dbi_base);
+ goto fail_clkreg;
+ }
+
+ ret = armada8k_pcie_setup_phys(pcie);
+ if (ret)
+ goto fail_clkreg;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = armada8k_add_pcie_port(pcie, pdev);
+ if (ret)
+ goto disable_phy;
+
+ return 0;
+
+disable_phy:
+ armada8k_pcie_disable_phys(pcie);
+fail_clkreg:
+ clk_disable_unprepare(pcie->clk_reg);
+fail:
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+ { .compatible = "marvell,armada8k-pcie", },
+ {},
+};
+
+static struct platform_driver armada8k_pcie_driver = {
+ .probe = armada8k_pcie_probe,
+ .driver = {
+ .name = "armada8k-pcie",
+ .of_match_table = armada8k_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
new file mode 100644
index 0000000000..9b572a2b2c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
+
+enum artpec_pcie_variants {
+ ARTPEC6,
+ ARTPEC7,
+};
+
+struct artpec6_pcie {
+ struct dw_pcie *pci;
+ struct regmap *regmap; /* DT axis,syscon-pcie */
+ void __iomem *phy_base; /* DT phy */
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+};
+
+struct artpec_pcie_of_data {
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id artpec6_pcie_of_match[];
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG 0x18
+#define PCIECFG_DBG_OEN BIT(24)
+#define PCIECFG_CORE_RESET_REQ BIT(21)
+#define PCIECFG_LTSSM_ENABLE BIT(20)
+#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16)
+#define PCIECFG_CLKREQ_B BIT(11)
+#define PCIECFG_REFCLK_ENABLE BIT(10)
+#define PCIECFG_PLL_ENABLE BIT(9)
+#define PCIECFG_PCLK_ENABLE BIT(8)
+#define PCIECFG_RISRCREN BIT(4)
+#define PCIECFG_MODE_TX_DRV_EN BIT(3)
+#define PCIECFG_CISRREN BIT(2)
+#define PCIECFG_MACRO_ENABLE BIT(0)
+/* ARTPEC-7 specific fields */
+#define PCIECFG_REFCLKSEL BIT(23)
+#define PCIECFG_NOC_RESET BIT(3)
+
+#define PCIESTAT 0x1c
+/* ARTPEC-7 specific fields */
+#define PCIESTAT_EXTREFCLK BIT(3)
+
+#define NOCCFG 0x40
+#define NOCCFG_ENABLE_CLK_PCIE BIT(4)
+#define NOCCFG_POWER_PCIE_IDLEACK BIT(3)
+#define NOCCFG_POWER_PCIE_IDLE BIT(2)
+#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1)
+
+#define PHY_STATUS 0x118
+#define PHY_COSPLLLOCK BIT(0)
+
+#define PHY_TX_ASIC_OUT 0x4040
+#define PHY_TX_ASIC_OUT_TX_ACK BIT(0)
+
+#define PHY_RX_ASIC_OUT 0x405c
+#define PHY_RX_ASIC_OUT_ACK BIT(0)
+
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
+{
+ u32 val;
+
+ regmap_read(artpec6_pcie->regmap, offset, &val);
+ return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+ regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ switch (artpec6_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ return pci_addr - pp->cfg0_base;
+ case DW_PCIE_EP_TYPE:
+ return pci_addr - ep->phys_base;
+ default:
+ dev_err(pci->dev, "UNKNOWN device type\n");
+ }
+ return pci_addr;
+}
+
+static int artpec6_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_LTSSM_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+ return 0;
+}
+
+static void artpec6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val &= ~PCIECFG_LTSSM_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
+ .start_link = artpec6_pcie_establish_link,
+ .stop_link = artpec6_pcie_stop_link,
+};
+
+static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ unsigned int retries;
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+ if (!retries)
+ dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+ retries--;
+ } while (retries && !(val & PHY_COSPLLLOCK));
+ if (!retries)
+ dev_err(dev, "PHY PLL did not lock\n");
+}
+
+static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ u16 phy_status_tx, phy_status_rx;
+ unsigned int retries;
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+ if (!retries)
+ dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
+ phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
+ retries--;
+ } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
+ (phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
+ if (!retries)
+ dev_err(dev, "PHY did not enter Pn state\n");
+}
+
+static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
+{
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
+ break;
+ case ARTPEC7:
+ artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
+ break;
+ }
+}
+
+static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_MODE_TX_DRV_EN |
+ PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
+ PCIECFG_MACRO_ENABLE;
+ val |= PCIECFG_REFCLK_ENABLE;
+ val &= ~PCIECFG_DBG_OEN;
+ val &= ~PCIECFG_CLKREQ_B;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(5000, 6000);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+ usleep_range(20, 30);
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(6000, 7000);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ u32 val;
+ bool extrefclk;
+
+ /* Check if external reference clock is connected */
+ val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
+ extrefclk = !!(val & PCIESTAT_EXTREFCLK);
+ dev_dbg(pci->dev, "Using reference clock: %s\n",
+ extrefclk ? "external" : "internal");
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_PCLK_ENABLE;
+ if (extrefclk)
+ val |= PCIECFG_REFCLKSEL;
+ else
+ val &= ~PCIECFG_REFCLKSEL;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(10, 20);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+ usleep_range(20, 30);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
+{
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ artpec6_pcie_init_phy_a6(artpec6_pcie);
+ break;
+ case ARTPEC7:
+ artpec6_pcie_init_phy_a7(artpec6_pcie);
+ break;
+ }
+}
+
+static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ val |= PCIECFG_CORE_RESET_REQ;
+ break;
+ case ARTPEC7:
+ val &= ~PCIECFG_NOC_RESET;
+ break;
+ }
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ val &= ~PCIECFG_CORE_RESET_REQ;
+ break;
+ case ARTPEC7:
+ val |= PCIECFG_NOC_RESET;
+ break;
+ }
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(100, 200);
+}
+
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
+ artpec6_pcie_assert_core_reset(artpec6_pcie);
+ artpec6_pcie_init_phy(artpec6_pcie);
+ artpec6_pcie_deassert_core_reset(artpec6_pcie);
+ artpec6_pcie_wait_for_phy(artpec6_pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+ .host_init = artpec6_pcie_host_init,
+};
+
+static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ enum pci_barno bar;
+
+ artpec6_pcie_assert_core_reset(artpec6_pcie);
+ artpec6_pcie_init_phy(artpec6_pcie);
+ artpec6_pcie_deassert_core_reset(artpec6_pcie);
+ artpec6_pcie_wait_for_phy(artpec6_pcie);
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = artpec6_pcie_ep_init,
+ .raise_irq = artpec6_pcie_raise_irq,
+};
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct artpec6_pcie *artpec6_pcie;
+ int ret;
+ const struct artpec_pcie_of_data *data;
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+ u32 val;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ variant = (enum artpec_pcie_variants)data->variant;
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
+ if (!artpec6_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ artpec6_pcie->pci = pci;
+ artpec6_pcie->variant = variant;
+ artpec6_pcie->mode = mode;
+
+ artpec6_pcie->phy_base =
+ devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(artpec6_pcie->phy_base))
+ return PTR_ERR(artpec6_pcie->phy_base);
+
+ artpec6_pcie->regmap =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "axis,syscon-pcie");
+ if (IS_ERR(artpec6_pcie->regmap))
+ return PTR_ERR(artpec6_pcie->regmap);
+
+ platform_set_drvdata(pdev, artpec6_pcie);
+
+ switch (artpec6_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
+ return -ENODEV;
+
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
+ return -ENODEV;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val &= ~PCIECFG_DEVICE_TYPE_MASK;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
+ }
+
+ return 0;
+}
+
+static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
+ .variant = ARTPEC6,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
+ .variant = ARTPEC6,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
+ .variant = ARTPEC7,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
+ .variant = ARTPEC7,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+ {
+ .compatible = "axis,artpec6-pcie",
+ .data = &artpec6_pcie_rc_of_data,
+ },
+ {
+ .compatible = "axis,artpec6-pcie-ep",
+ .data = &artpec6_pcie_ep_of_data,
+ },
+ {
+ .compatible = "axis,artpec7-pcie",
+ .data = &artpec7_pcie_rc_of_data,
+ },
+ {
+ .compatible = "axis,artpec7-pcie-ep",
+ .data = &artpec7_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+ .probe = artpec6_pcie_probe,
+ .driver = {
+ .name = "artpec6-pcie",
+ .of_match_table = artpec6_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
new file mode 100644
index 0000000000..17e696797f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 PCIe controller driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Baikal-T1 System CCU control registers */
+#define BT1_CCU_PCIE_CLKC 0x140
+#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
+#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
+#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
+
+#define BT1_CCU_PCIE_RSTC 0x144
+#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
+#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
+#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
+#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
+#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
+#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
+
+#define BT1_CCU_PCIE_PMSC 0x148
+#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
+#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
+#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
+#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
+#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
+#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
+#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
+#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
+#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
+#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
+#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
+#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
+#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
+#define BT1_CCU_PCIE_LTSSM_L0 0x11
+#define BT1_CCU_PCIE_LTSSM_L0S 0x12
+#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
+#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
+#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
+#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
+#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
+#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
+#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
+#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
+#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
+#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
+#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
+#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
+#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
+#define BT1_CCU_PCIE_L1_PENDING BIT(12)
+#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
+#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
+#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
+#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
+#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
+#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
+#define BT1_CCU_PCIE_WAKE_DET BIT(24)
+#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
+#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
+
+#define BT1_CCU_PCIE_GENC 0x14c
+#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
+#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
+#define BT1_CCU_PCIE_MGMT_EN BIT(3)
+#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
+#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
+#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
+#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
+#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
+#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
+
+#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
+({ \
+ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
+ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
+})
+
+/* Baikal-T1 PCIe specific control registers */
+#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
+#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
+
+#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
+#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
+#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
+#define BT1_PCIE_AXI2MGM_DONE BIT(30)
+#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
+
+#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
+#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
+
+#define BT1_PCIE_AXI2MGM_READDATA 0xd10
+#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
+
+/* Generic Baikal-T1 PCIe interface resources */
+#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
+#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
+#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
+#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
+
+/* PCIe bus setup delays and timeouts */
+#define BT1_PCIE_RST_DELAY_MS 100
+#define BT1_PCIE_RUN_DELAY_US 100
+#define BT1_PCIE_REQ_DELAY_US 1
+#define BT1_PCIE_REQ_TIMEOUT_US 1000
+#define BT1_PCIE_LNK_DELAY_US 1000
+#define BT1_PCIE_LNK_TIMEOUT_US 1000000
+
+static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
+ DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
+};
+
+static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
+ DW_PCIE_REF_CLK,
+};
+
+static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
+ DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
+};
+
+static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
+ DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
+};
+
+struct bt1_pcie {
+ struct dw_pcie dw;
+ struct platform_device *pdev;
+ struct regmap *sys_regs;
+};
+#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
+
+/*
+ * Baikal-T1 MMIO space must be read/written by the dword-aligned
+ * instructions. Note the methods are optimized to have the dword operations
+ * performed with minimum overhead as the most frequently used ones.
+ */
+static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
+ if (size == 4) {
+ return 0;
+ } else if (size == 2) {
+ *val &= 0xffff;
+ return 0;
+ } else if (size == 1) {
+ *val &= 0xff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+ u32 tmp, mask;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ if (size == 4) {
+ writel(val, addr);
+ return 0;
+ } else if (size == 2 || size == 1) {
+ mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
+ tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
+ tmp |= (val & mask) << ofs * BITS_PER_BYTE;
+ writel(tmp, addr - ofs);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ ret = bt1_pcie_read_mmio(base + reg, size, &val);
+ if (ret) {
+ dev_err(pci->dev, "Read DBI address failed\n");
+ return ~0U;
+ }
+
+ return val;
+}
+
+static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI2 address failed\n");
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, 0);
+}
+
+static int bt1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable LTSSM and make sure it was able to establish both PHY and
+ * data links. This procedure shall work fine to reach 2.5 GT/s speed.
+ */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_SMLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_RDLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set data link up\n");
+ return ret;
+ }
+
+ /*
+ * Activate direct speed change after the link is established in an
+ * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
+ * This is required at least to get 8.0 GT/s speed.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ BT1_CCU_PCIE_LTSSM_LINKUP(val),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret)
+ dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
+
+ return ret;
+}
+
+static void bt1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+}
+
+static const struct dw_pcie_ops bt1_pcie_ops = {
+ .read_dbi = bt1_pcie_read_dbi,
+ .write_dbi = bt1_pcie_write_dbi,
+ .write_dbi2 = bt1_pcie_write_dbi2,
+ .start_link = bt1_pcie_start_link,
+ .stop_link = bt1_pcie_stop_link,
+};
+
+static struct pci_ops bt1_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ int i;
+
+ /* DBI access is supposed to be performed by the dword-aligned IOs */
+ btpci->dw.pp.bridge->ops = &bt1_pci_ops;
+
+ /* These CSRs are in MMIO so we won't check the regmap-methods status */
+ btpci->sys_regs =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
+ if (IS_ERR(btpci->sys_regs))
+ return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
+ "Failed to get syscon\n");
+
+ /* Make sure all the required resources have been specified */
+ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
+ if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
+ dev_err(dev, "App clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
+ if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
+ dev_err(dev, "Core clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
+ if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
+ dev_err(dev, "App resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
+ if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
+ dev_err(dev, "Core resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ int ret;
+
+ /* Disable LTSSM for sure */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+
+ /*
+ * Application reset controls are trigger-based so assert the core
+ * resets only.
+ */
+ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
+ if (ret)
+ dev_err(dev, "Failed to assert core resets\n");
+
+ /*
+ * Clocks are disabled by default at least in accordance with the clk
+ * enable counter value on init stage.
+ */
+ if (!init) {
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ }
+
+ /* The peripheral devices are unavailable anyway so reset them too */
+ gpiod_set_value_cansleep(pci->pe_rst, 1);
+
+ /* Make sure all the resets are settled */
+ msleep(BT1_PCIE_RST_DELAY_MS);
+}
+
+/*
+ * Implements the cold reset procedure in accordance with the reference manual
+ * and available PM signals.
+ */
+static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ u32 val;
+ int ret;
+
+ /* First get out of the Power/Hot reset state */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert hot reset\n");
+ goto err_assert_pwr_rst;
+ }
+
+ /* Wait for the PM-core to stop requesting the PHY reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_PHY_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
+ goto err_assert_hot_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ goto err_assert_hot_rst;
+ }
+
+ /* Clocks can be now enabled, but the ref one is crucial at this stage */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable app clocks\n");
+ goto err_assert_phy_rst;
+ }
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable ref clocks\n");
+ goto err_disable_app_clk;
+ }
+
+ /* Wait for the PM to stop requesting the controller core reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_CORE_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
+ goto err_disable_core_clk;
+ }
+
+ /* PCS-PIPE interface and controller core can be now activated */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PIPE reset\n");
+ goto err_disable_core_clk;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core reset\n");
+ goto err_assert_pipe_rst;
+ }
+
+ /* It's recommended to reset the core and application logic together */
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret) {
+ dev_err(dev, "Failed to reset app domain\n");
+ goto err_assert_core_rst;
+ }
+
+ /* Sticky/Non-sticky CSR flags can be now unreset too */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert sticky reset\n");
+ goto err_assert_core_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert non-sticky reset\n");
+ goto err_assert_sticky_rst;
+ }
+
+ /* Activate the PCIe bus peripheral devices */
+ gpiod_set_value_cansleep(pci->pe_rst, 0);
+
+ /* Make sure the state is settled (LTSSM is still disabled though) */
+ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
+
+ return 0;
+
+err_assert_sticky_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+
+err_assert_core_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+
+err_assert_pipe_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+
+err_disable_core_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+err_disable_app_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+err_assert_phy_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+
+err_assert_hot_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+
+err_assert_pwr_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+
+ return ret;
+}
+
+static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ ret = bt1_pcie_get_resources(btpci);
+ if (ret)
+ return ret;
+
+ bt1_pcie_full_stop_bus(btpci, true);
+
+ return bt1_pcie_cold_start_bus(btpci);
+}
+
+static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ bt1_pcie_full_stop_bus(btpci, false);
+}
+
+static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
+ .host_init = bt1_pcie_host_init,
+ .host_deinit = bt1_pcie_host_deinit,
+};
+
+static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
+ if (!btpci)
+ return ERR_PTR(-ENOMEM);
+
+ btpci->pdev = pdev;
+
+ platform_set_drvdata(pdev, btpci);
+
+ return btpci;
+}
+
+static int bt1_pcie_add_port(struct bt1_pcie *btpci)
+{
+ struct device *dev = &btpci->pdev->dev;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ btpci->dw.version = DW_PCIE_VER_460A;
+ btpci->dw.dev = dev;
+ btpci->dw.ops = &bt1_pcie_ops;
+
+ btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
+ btpci->dw.pp.ops = &bt1_pcie_host_ops;
+
+ dw_pcie_cap_set(&btpci->dw, REQ_RES);
+
+ ret = dw_pcie_host_init(&btpci->dw.pp);
+
+ return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
+}
+
+static void bt1_pcie_del_port(struct bt1_pcie *btpci)
+{
+ dw_pcie_host_deinit(&btpci->dw.pp);
+}
+
+static int bt1_pcie_probe(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = bt1_pcie_create_data(pdev);
+ if (IS_ERR(btpci))
+ return PTR_ERR(btpci);
+
+ return bt1_pcie_add_port(btpci);
+}
+
+static void bt1_pcie_remove(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci = platform_get_drvdata(pdev);
+
+ bt1_pcie_del_port(btpci);
+}
+
+static const struct of_device_id bt1_pcie_of_match[] = {
+ { .compatible = "baikal,bt1-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
+
+static struct platform_driver bt1_pcie_driver = {
+ .probe = bt1_pcie_probe,
+ .remove_new = bt1_pcie_remove,
+ .driver = {
+ .name = "bt1-pcie",
+ .of_match_table = bt1_pcie_of_match,
+ },
+};
+module_platform_driver(bt1_pcie_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
new file mode 100644
index 0000000000..8d79dd0e1d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
+
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie_ep_func *ep_func;
+
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
+}
+
+static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
+{
+ unsigned int func_offset = 0;
+
+ if (ep->ops->func_conf_select)
+ func_offset = ep->ops->func_conf_select(ep, func_no);
+
+ return func_offset;
+}
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
+{
+ u32 reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi2(pci, reg, 0x0);
+ dw_pcie_writel_dbi(pci, reg, 0x0);
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+ dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ }
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
+
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 next_cap_ptr;
+ u16 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *hdr)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
+{
+ int ret;
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
+ if (free_win >= pci->num_ib_windows) {
+ dev_err(pci->dev, "No free inbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
+ if (ret < 0) {
+ dev_err(pci->dev, "Failed to program IB window\n");
+ return ret;
+ }
+
+ ep->bar_to_atu[bar] = free_win;
+ set_bit(free_win, ep->ib_window_map);
+
+ return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
+
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
+ dev_err(pci->dev, "No free outbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
+
+ set_bit(free_win, ep->ob_window_map);
+ ep->outbound_addr[free_win] = phys_addr;
+
+ return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
+
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
+ clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
+
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ if (ret)
+ return ret;
+
+ if (ep->epf_bar[bar])
+ return 0;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg + 4, 0);
+ }
+
+ ep->epf_bar[bar] = epf_bar;
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ u32 *atu_index)
+{
+ u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (index = 0; index < pci->num_ob_windows; index++) {
+ if (ep->outbound_addr[index] != addr)
+ continue;
+ *atu_index = index;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr)
+{
+ int ret;
+ u32 atu_index;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_find_index(ep, addr, &atu_index);
+ if (ret < 0)
+ return;
+
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
+ clear_bit(atu_index, ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to enable address\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSI_FLAGS_QMASK;
+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
+ dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_start_link(pci);
+}
+
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ return ep->ops->get_features(ep);
+}
+
+static const struct pci_epc_ops epc_ops = {
+ .write_header = dw_pcie_ep_write_header,
+ .set_bar = dw_pcie_ep_set_bar,
+ .clear_bar = dw_pcie_ep_clear_bar,
+ .map_addr = dw_pcie_ep_map_addr,
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
+ .set_msix = dw_pcie_ep_set_msix,
+ .get_msix = dw_pcie_ep_get_msix,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+ .get_features = dw_pcie_ep_get_features,
+};
+
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epc *epc = ep->epc;
+ unsigned int aligned_offset;
+ unsigned int func_offset = 0;
+ u16 msg_ctrl, msg_data;
+ u32 msg_addr_lower, msg_addr_upper, reg;
+ u64 msg_addr;
+ bool has_upper;
+ int ret;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ if (has_upper) {
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
+ } else {
+ msg_addr_upper = 0;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
+ }
+ aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
+ msg_addr = ((u64)msg_addr_upper) << 32 |
+ (msg_addr_lower & ~aligned_offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
+
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
+
+ return 0;
+}
+
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct pci_epc *epc = ep->epc;
+ unsigned int func_offset = 0;
+ u32 reg, msg_data, vec_ctrl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
+ int ret;
+ u8 bir;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
+
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+ dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
+ return -EPERM;
+ }
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr &= ~aligned_offset;
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data, ep->msi_mem + aligned_offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_edma_remove(pci);
+
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+ pci_epc_mem_exit(epc);
+}
+
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+ u32 header;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ while (pos) {
+ header = dw_pcie_readl_dbi(pci, pos);
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (!pos)
+ break;
+ }
+
+ return 0;
+}
+
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset, ptm_cap_base;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
+ int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ }
+
+ /*
+ * PTM responder capability can be disabled only after disabling
+ * PTM root capability.
+ */
+ if (ptm_cap_base) {
+ dw_pcie_dbi_ro_wr_en(pci);
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~PCI_PTM_CAP_ROOT;
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ void *addr;
+ u8 func_no;
+ struct resource *res;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
+ struct dw_pcie_ep_func *ep_func;
+
+ INIT_LIST_HEAD(&ep->func_list);
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ return -ENOMEM;
+
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ return -ENOMEM;
+
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "Failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize address space\n");
+ return ret;
+ }
+
+ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
+ epc->mem->window.page_size);
+ if (!ep->msi_mem) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
+ goto err_exit_epc_mem;
+ }
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_epc_mem;
+
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
+ }
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_remove_edma;
+
+ return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
new file mode 100644
index 0000000000..a7170fd0e8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_child_pcie_ops;
+
+static void dw_msi_ack_irq(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void dw_msi_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void dw_msi_unmask_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip dw_pcie_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_ack = dw_msi_ack_irq,
+ .irq_mask = dw_msi_mask_irq,
+ .irq_unmask = dw_msi_unmask_irq,
+};
+
+static struct msi_domain_info dw_pcie_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &dw_pcie_msi_irq_chip,
+};
+
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ int i, pos;
+ unsigned long val;
+ u32 status, num_ctrls;
+ irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ for (i = 0; i < num_ctrls; i++) {
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!status)
+ continue;
+
+ ret = IRQ_HANDLED;
+ val = status;
+ pos = 0;
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
+ pos++;
+ }
+ }
+
+ return ret;
+}
+
+/* Chained MSI interrupt service routine */
+static void dw_chained_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie_rp *pp;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ dw_handle_msi_irq(pp);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target;
+
+ msi_target = (u64)pp->msi_data;
+
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+
+ msg->data = d->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dw_pci_msi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void dw_pci_bottom_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_ack(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
+}
+
+static struct irq_chip dw_pci_msi_bottom_irq_chip = {
+ .name = "DWPCI-MSI",
+ .irq_ack = dw_pci_bottom_ack,
+ .irq_compose_msi_msg = dw_pci_setup_msi_msg,
+ .irq_set_affinity = dw_pci_msi_set_affinity,
+ .irq_mask = dw_pci_bottom_mask,
+ .irq_unmask = dw_pci_bottom_unmask,
+};
+
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct dw_pcie_rp *pp = domain->host_data;
+ unsigned long flags;
+ u32 i;
+ int bit;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+ order_base_2(nr_irqs));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+
+ if (bit < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, bit + i,
+ pp->msi_irq_chip,
+ pp, handle_edge_irq,
+ NULL, NULL);
+
+ return 0;
+}
+
+static void dw_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct dw_pcie_rp *pp = domain->host_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
+ order_base_2(nr_irqs));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
+ .alloc = dw_pcie_irq_domain_alloc,
+ .free = dw_pcie_irq_domain_free,
+};
+
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+
+ pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
+ &dw_pcie_msi_domain_ops, pp);
+ if (!pp->irq_domain) {
+ dev_err(pci->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
+ pp->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &dw_pcie_msi_domain_info,
+ pp->irq_domain);
+ if (!pp->msi_domain) {
+ dev_err(pci->dev, "Failed to create MSI domain\n");
+ irq_domain_remove(pp->irq_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
+
+ irq_domain_remove(pp->msi_domain);
+ irq_domain_remove(pp->irq_domain);
+}
+
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target = (u64)pp->msi_data;
+
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
+ return;
+
+ /* Program the msi_data */
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
+}
+
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack 64-bit message support. In
+ * order not to miss MSI TLPs from those devices the MSI target
+ * address has to be within the lowest 4GB.
+ *
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win;
+ struct pci_host_bridge *bridge;
+ struct resource *res;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+ } else {
+ dev_err(dev, "Missing *config* reg space\n");
+ return -ENODEV;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ /* Get the I/O range from DT */
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
+ }
+
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
+
+ if (pp->ops->host_init) {
+ ret = pp->ops->host_init(pp);
+ if (ret)
+ return ret;
+ }
+
+ if (pci_msi_enabled()) {
+ pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ of_property_read_bool(np, "msi-parent") ||
+ of_property_read_bool(np, "msi-map"));
+
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ ret = -EINVAL;
+ goto err_deinit_host;
+ }
+
+ if (pp->ops->msi_host_init) {
+ ret = pp->ops->msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
+ } else if (pp->has_msi_ctrl) {
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
+ }
+ }
+
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_msi;
+
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_remove_edma;
+
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ goto err_remove_edma;
+ }
+
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
+
+ bridge->sysdata = pp;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+err_free_msi:
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
+ dw_pcie_edma_remove(pci);
+
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
+
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
+
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (pci_is_root_bus(bus->parent))
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ type = PCIE_ATU_TYPE_CFG1;
+
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
+
+ return pp->va_cfg0_base + where;
+}
+
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
+
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+
+static struct pci_ops dw_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all out/inbound windows are disabled before proceeding with
+ * the MEM/IO (dma-)ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ for (i = 0; i < pci->num_ib_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
+ pci->num_ob_windows);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ib_windows <= i)
+ break;
+
+ ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set DMA range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pci->num_ib_windows <= i)
+ dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
+ pci->num_ib_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
+
+ /*
+ * Enable DBI read-only registers for writing/updating configuration.
+ * Write permission gets disabled towards the end of this function.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_setup(pci);
+
+ if (pp->has_msi_ctrl) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+ }
+
+ dw_pcie_msi_init(pp);
+
+ /* Setup RC BARs */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
+
+ /* Setup interrupt pins */
+ val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
+ val &= 0xffff00ff;
+ val |= 0x00000100;
+ dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
+
+ /* Setup bus numbers */
+ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
+ val &= 0xff000000;
+ val |= 0x00ff0100;
+ dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
+
+ /* Setup command register */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val &= 0xffff0000;
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ /*
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
+ */
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
+ }
+
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+ /* Program correct class for RC */
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int ret;
+
+ /*
+ * If L1SS is supported, then do not put the link into L2 as some
+ * devices such as NVMe expect low resume latency.
+ */
+ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
+ return 0;
+
+ if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
+ return 0;
+
+ if (!pci->pp.ops->pme_turn_off)
+ return 0;
+
+ pci->pp.ops->pme_turn_off(&pci->pp);
+
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
+ if (ret) {
+ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ return ret;
+ }
+
+ if (pci->pp.ops->host_deinit)
+ pci->pp.ops->host_deinit(&pci->pp);
+
+ pci->suspended = true;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
+
+int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ int ret;
+
+ if (!pci->suspended)
+ return 0;
+
+ pci->suspended = false;
+
+ if (pci->pp.ops->host_init) {
+ ret = pci->pp.ops->host_init(&pci->pp);
+ if (ret) {
+ dev_err(pci->dev, "Host init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
new file mode 100644
index 0000000000..b625841e98
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+struct dw_plat_pcie {
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dw_plat_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+};
+
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dw_plat_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dw_plat_pcie_ep_init,
+ .raise_irq = dw_plat_pcie_ep_raise_irq,
+ .get_features = dw_plat_pcie_get_features,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->ops = &dw_plat_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_plat_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_plat_pcie *dw_plat_pcie;
+ struct dw_pcie *pci;
+ int ret;
+ const struct dw_plat_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+ if (!dw_plat_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+
+ dw_plat_pcie->pci = pci;
+ dw_plat_pcie->mode = mode;
+
+ platform_set_drvdata(pdev, dw_plat_pcie);
+
+ switch (dw_plat_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &pcie_ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[] = {
+ {
+ .compatible = "snps,dw-pcie",
+ .data = &dw_plat_pcie_rc_of_data,
+ },
+ {
+ .compatible = "snps,dw-pcie-ep",
+ .data = &dw_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver dw_plat_pcie_driver = {
+ .driver = {
+ .name = "dw-pcie",
+ .of_match_table = dw_plat_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = dw_plat_pcie_probe,
+};
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
new file mode 100644
index 0000000000..2b60d20dfd
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
+ [DW_PCIE_DBI_CLK] = "dbi",
+ [DW_PCIE_MSTR_CLK] = "mstr",
+ [DW_PCIE_SLV_CLK] = "slv",
+};
+
+static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
+ [DW_PCIE_PIPE_CLK] = "pipe",
+ [DW_PCIE_CORE_CLK] = "core",
+ [DW_PCIE_AUX_CLK] = "aux",
+ [DW_PCIE_REF_CLK] = "ref",
+};
+
+static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
+ [DW_PCIE_DBI_RST] = "dbi",
+ [DW_PCIE_MSTR_RST] = "mstr",
+ [DW_PCIE_SLV_RST] = "slv",
+};
+
+static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
+ [DW_PCIE_NON_STICKY_RST] = "non-sticky",
+ [DW_PCIE_STICKY_RST] = "sticky",
+ [DW_PCIE_CORE_RST] = "core",
+ [DW_PCIE_PIPE_RST] = "pipe",
+ [DW_PCIE_PHY_RST] = "phy",
+ [DW_PCIE_HOT_RST] = "hot",
+ [DW_PCIE_PWR_RST] = "pwr",
+};
+
+static int dw_pcie_get_clocks(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
+ pci->app_clks[i].id = dw_pcie_app_clks[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
+ pci->core_clks[i].id = dw_pcie_core_clks[i];
+
+ ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
+ pci->app_clks);
+ if (ret)
+ return ret;
+
+ return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
+ pci->core_clks);
+}
+
+static int dw_pcie_get_resets(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
+ pci->app_rsts[i].id = dw_pcie_app_rsts[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
+ pci->core_rsts[i].id = dw_pcie_core_rsts[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
+ DW_PCIE_NUM_APP_RSTS,
+ pci->app_rsts);
+ if (ret)
+ return ret;
+
+ ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
+ DW_PCIE_NUM_CORE_RSTS,
+ pci->core_rsts);
+ if (ret)
+ return ret;
+
+ pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pci->pe_rst))
+ return PTR_ERR(pci->pe_rst);
+
+ return 0;
+}
+
+int dw_pcie_get_resources(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct device_node *np = dev_of_node(pci->dev);
+ struct resource *res;
+ int ret;
+
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ /* DBI2 is mainly useful for the endpoint controller */
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (res) {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ } else {
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ }
+ }
+
+ /* For non-unrolled iATU/eDMA platforms this range will be ignored */
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Set a default value suitable for at most 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
+ /* LLDD is supposed to manually switch the clocks and resets state */
+ if (dw_pcie_cap_is(pci, REQ_RES)) {
+ ret = dw_pcie_get_clocks(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_get_resets(pci);
+ if (ret)
+ return ret;
+ }
+
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+
+ if (of_property_read_bool(np, "snps,enable-cdm-check"))
+ dw_pcie_cap_set(pci, CDM_CHECK);
+
+ return 0;
+}
+
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
+/*
+ * These interfaces resemble the pci_find_*capability() interfaces, but these
+ * are for configuring host controllers, which are bridges *to* PCI devices but
+ * are not PCI devices themselves.
+ */
+static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+ u8 cap)
+{
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+{
+ u8 next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
+ u8 cap)
+{
+ u32 header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (start)
+ pos = start;
+
+ header = dw_pcie_readl_dbi(pci, pos);
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ header = dw_pcie_readl_dbi(pci, pos);
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+{
+ return dw_pcie_find_next_ext_capability(pci, 0, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
+
+int dw_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ writel(val, addr);
+ else if (size == 2)
+ writew(val, addr);
+ else if (size == 1)
+ writeb(val, addr);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
+
+ ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DBI address failed\n");
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
+
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->dbi_base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
+
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops && pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
+}
+
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
+{
+ if (dw_pcie_cap_is(pci, IATU_UNROLL))
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
+ int ret;
+ u32 val;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, base, reg, 4);
+
+ ret = dw_pcie_read(base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read ATU address failed\n");
+
+ return val;
+}
+
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
+{
+ void __iomem *base;
+ int ret;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, base, reg, 4, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, 4, val);
+ if (ret)
+ dev_err(pci->dev, "Write ATU address failed\n");
+}
+
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
+}
+
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
+}
+
+static inline u32 dw_pcie_enable_ecrc(u32 val)
+{
+ /*
+ * DesignWare core version 4.90A has a design issue where the 'TD'
+ * bit in the Control register-1 of the ATU outbound region acts
+ * like an override for the ECRC setting, i.e., the presence of TLP
+ * Digest (ECRC) in the outgoing TLPs is solely determined by this
+ * bit. This is contrary to the PCIe spec which says that the
+ * enablement of the ECRC is solely determined by the AER
+ * registers.
+ *
+ * Because of this, even when the ECRC is enabled through AER
+ * registers, the transactions going through ATU won't have TLP
+ * Digest as there is no way the PCI core AER code could program
+ * the TD bit which is specific to the DesignWare core.
+ *
+ * The best way to handle this scenario is to program the TD bit
+ * always. It affects only the traffic from root port to downstream
+ * devices.
+ *
+ * At this point,
+ * When ECRC is enabled in AER registers, everything works normally
+ * When ECRC is NOT enabled in AER registers, then,
+ * on Root Port:- TLP Digest (DWord size) gets appended to each packet
+ * even through it is not required. Since downstream
+ * TLPs are mostly for configuration accesses and BAR
+ * accesses, they are not in critical path and won't
+ * have much negative effect on the performance.
+ * on End Point:- TLP Digest is received for some/all the packets coming
+ * from the root port. TLP Digest is ignored because,
+ * as per the PCIe Spec r5.0 v1.0 section 2.2.3
+ * "TLP Digest Rules", when an endpoint receives TLP
+ * Digest when its ECRC check functionality is disabled
+ * in AER registers, received TLP Digest is just ignored.
+ * Since there is no issue or error reported either side, best way to
+ * handle the scenario is to program TD bit by default.
+ */
+
+ return val | PCIE_ATU_TD;
+}
+
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ u32 retries, val;
+ u64 limit_addr;
+
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
+{
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
+}
+
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ u64 limit_addr = pci_addr + size - 1;
+ u32 retries, val;
+
+ if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(pci_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ val = type;
+ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar)
+{
+ u32 retries, val;
+
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ return -EINVAL;
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
+{
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
+}
+
+int dw_pcie_wait_for_link(struct dw_pcie *pci)
+{
+ u32 offset, val;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (dw_pcie_link_up(pci))
+ break;
+
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_info(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
+
+int dw_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->link_up)
+ return pci->ops->link_up(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
+
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+ val |= PORT_MLTI_UPCFG_SUPPORT;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
+
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+{
+ u32 cap, ctrl2, link_speed;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
+
+ switch (pcie_link_speed[link_gen]) {
+ case PCIE_SPEED_2_5GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
+ break;
+ case PCIE_SPEED_5_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ case PCIE_SPEED_8_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
+ break;
+ case PCIE_SPEED_16_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
+ break;
+ default:
+ /* Use hardware capability */
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
+ break;
+ }
+
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
+
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
+
+}
+
+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+{
+ u32 lnkcap, lwsc, plc;
+ u8 cap;
+
+ if (!num_lanes)
+ return;
+
+ /* Set the number of lanes */
+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ plc &= ~PORT_LINK_FAST_LINK_MODE;
+ plc &= ~PORT_LINK_MODE_MASK;
+
+ /* Set link width speed control register */
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ switch (num_lanes) {
+ case 1:
+ plc |= PORT_LINK_MODE_1_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ plc |= PORT_LINK_MODE_2_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ plc |= PORT_LINK_MODE_4_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ case 8:
+ plc |= PORT_LINK_MODE_8_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
+
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+}
+
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
+{
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xFFFFFFFF) {
+ dw_pcie_cap_set(pci, IATU_UNROLL);
+
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
+
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
+ }
+
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
+ }
+
+ pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
+
+ dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
+ dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
+}
+
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
+{
+ u32 val = 0;
+ int ret;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_plat_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
+ } else {
+ return -ENODEV;
+ }
+
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[6];
+ int ret;
+
+ if (pci->edma.nr_irqs == 1)
+ return 0;
+ else if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->link_gen > 0)
+ dw_pcie_link_set_max_speed(pci, pci->link_gen);
+
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ if (dw_pcie_cap_is(pci, CDM_CHECK)) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
new file mode 100644
index 0000000000..ef0b2efa9f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -0,0 +1,645 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/reset.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
+/* DWC PCIe controller capabilities */
+#define DW_PCIE_CAP_REQ_RES 0
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
+
+#define dw_pcie_cap_is(_pci, _cap) \
+ test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+#define dw_pcie_cap_set(_pci, _cap) \
+ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES 5
+#define LINK_WAIT_IATU 9
+
+/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR 0x70C
+#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
+#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
+
+#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
+#define PORT_LINK_MODE_MASK GENMASK(21, 16)
+#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+
+#define PCIE_PORT_DEBUG0 0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
+#define PORT_LOGIC_SPEED_CHANGE BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
+
+#define PCIE_MSI_ADDR_LO 0x820
+#define PCIE_MSI_ADDR_HI 0x824
+#define PCIE_MSI_INTR0_ENABLE 0x828
+#define PCIE_MSI_INTR0_MASK 0x82C
+#define PCIE_MSI_INTR0_STATUS 0x830
+
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
+#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
+#define PCIE_ATU_TYPE_MEM 0x0
+#define PCIE_ATU_TYPE_IO 0x2
+#define PCIE_ATU_TYPE_CFG0 0x4
+#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TD BIT(8)
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
+#define PCIE_ATU_REGION_CTRL2 0x004
+#define PCIE_ATU_ENABLE BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
+#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
+
+#define PCIE_MISC_CONTROL_1_OFF 0x8BC
+#define PCIE_DBI_RO_WR_EN BIT(0)
+
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+
+/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
+#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
+
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1 0x00
+#define PCIE_ATU_UNR_REGION_CTRL2 0x04
+#define PCIE_ATU_UNR_LOWER_BASE 0x08
+#define PCIE_ATU_UNR_UPPER_BASE 0x0C
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_TARGET 0x14
+#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
+
+/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
+
+#define MAX_MSI_IRQS 256
+#define MAX_MSI_IRQS_PER_CTRL 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE 12
+#define MSI_DEF_NUM_VECTORS 32
+
+/* Maximum number of inbound/outbound iATUs */
+#define MAX_IATU_IN 256
+#define MAX_IATU_OUT 256
+
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
+struct dw_pcie;
+struct dw_pcie_rp;
+struct dw_pcie_ep;
+
+enum dw_pcie_device_mode {
+ DW_PCIE_UNKNOWN_TYPE,
+ DW_PCIE_EP_TYPE,
+ DW_PCIE_LEG_EP_TYPE,
+ DW_PCIE_RC_TYPE,
+};
+
+enum dw_pcie_app_clk {
+ DW_PCIE_DBI_CLK,
+ DW_PCIE_MSTR_CLK,
+ DW_PCIE_SLV_CLK,
+ DW_PCIE_NUM_APP_CLKS
+};
+
+enum dw_pcie_core_clk {
+ DW_PCIE_PIPE_CLK,
+ DW_PCIE_CORE_CLK,
+ DW_PCIE_AUX_CLK,
+ DW_PCIE_REF_CLK,
+ DW_PCIE_NUM_CORE_CLKS
+};
+
+enum dw_pcie_app_rst {
+ DW_PCIE_DBI_RST,
+ DW_PCIE_MSTR_RST,
+ DW_PCIE_SLV_RST,
+ DW_PCIE_NUM_APP_RSTS
+};
+
+enum dw_pcie_core_rst {
+ DW_PCIE_NON_STICKY_RST,
+ DW_PCIE_STICKY_RST,
+ DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST,
+ DW_PCIE_PHY_RST,
+ DW_PCIE_HOT_RST,
+ DW_PCIE_PWR_RST,
+ DW_PCIE_NUM_CORE_RSTS
+};
+
+enum dw_pcie_ltssm {
+ /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
+ DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
+ DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L2_IDLE = 0x15,
+
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+};
+
+struct dw_pcie_host_ops {
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
+ void (*pme_turn_off)(struct dw_pcie_rp *pp);
+};
+
+struct dw_pcie_rp {
+ bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
+ u64 cfg0_base;
+ void __iomem *va_cfg0_base;
+ u32 cfg0_size;
+ resource_size_t io_base;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ int irq;
+ const struct dw_pcie_host_ops *ops;
+ int msi_irq[MAX_MSI_CTRLS];
+ struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
+ dma_addr_t msi_data;
+ struct irq_chip *msi_irq_chip;
+ u32 num_vectors;
+ u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_host_bridge *bridge;
+ raw_spinlock_t lock;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+ int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num);
+ const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
+};
+
+struct dw_pcie_ep {
+ struct pci_epc *epc;
+ struct list_head func_list;
+ const struct dw_pcie_ep_ops *ops;
+ phys_addr_t phys_base;
+ size_t addr_size;
+ size_t page_size;
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
+ phys_addr_t *outbound_addr;
+ unsigned long *ib_window_map;
+ unsigned long *ob_window_map;
+ void __iomem *msi_mem;
+ phys_addr_t msi_mem_phys;
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
+};
+
+struct dw_pcie_ops {
+ u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+ u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
+ int (*link_up)(struct dw_pcie *pcie);
+ enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
+ int (*start_link)(struct dw_pcie *pcie);
+ void (*stop_link)(struct dw_pcie *pcie);
+};
+
+struct dw_pcie {
+ struct device *dev;
+ void __iomem *dbi_base;
+ void __iomem *dbi_base2;
+ void __iomem *atu_base;
+ size_t atu_size;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
+ struct dw_pcie_ep ep;
+ const struct dw_pcie_ops *ops;
+ u32 version;
+ u32 type;
+ unsigned long caps;
+ int num_lanes;
+ int link_gen;
+ u8 n_fts[2];
+ struct dw_edma_chip edma;
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
+ struct gpio_desc *pe_rst;
+ bool suspended;
+};
+
+#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+
+#define to_dw_pcie_from_ep(endpoint) \
+ container_of((endpoint), struct dw_pcie, ep)
+
+int dw_pcie_get_resources(struct dw_pcie *pci);
+
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_write(void __iomem *addr, int size, u32 val);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+int dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
+int dw_pcie_wait_for_link(struct dw_pcie *pci);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
+void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
+
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val |= PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val &= ~PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
+static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->get_ltssm)
+ return pci->ops->get_ltssm(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+
+ return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
+}
+
+#ifdef CONFIG_PCIE_DW_HOST
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+#else
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ return IRQ_NONE;
+}
+
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+}
+
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
+#endif
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 0000000000..2fe42c7009
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct dw_pcie_rp *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = reset_control_deassert(rockchip->rst);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 0000000000..1e9b44b8bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int ret;
+ u32 orig, tmp;
+
+ /*
+ * Force 2.5GT/s when starting the link, due to some devices not
+ * probing at higher speeds. This happens with the PCIe switch
+ * on the Unmatched board when U-Boot has not initialised the PCIe.
+ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
+ * by the soft reset done by this driver.
+ */
+ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ orig = tmp & PCI_EXP_LNKCAP_SLS;
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start\n");
+ goto err;
+ }
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
+ dev_dbg(dev, "changing speed back to original\n");
+
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= orig;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start at new speed\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ WARN_ON(ret); /* we assume that errors will be very rare */
+ dw_pcie_dbi_ro_wr_dis(pci);
+ return ret;
+}
+
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .host_init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+ pci->pp.num_vectors = MAX_MSI_IRQS;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
new file mode 100644
index 0000000000..8904b5b85e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon SoCs
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Zhou Wang <wangzhou1@hisilicon.com>
+ * Dacai Zhu <zhudacai@hisilicon.com>
+ * Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+struct hisi_pcie {
+ void __iomem *reg_base;
+};
+
+static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_read32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_write32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct hisi_pcie *pcie = cfg->priv;
+
+ if (bus->number == cfg->busr.start)
+ return pcie->reg_base + where;
+ else
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ /*
+ * Retrieve RC base and size from a HISI0081 device with _UID
+ * matching our segment.
+ */
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return -ENOMEM;
+ }
+
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
+ return -ENOMEM;
+
+ cfg->priv = pcie;
+ return 0;
+}
+
+const struct pci_ecam_ops hisi_pcie_ops = {
+ .init = hisi_pcie_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_rd_conf,
+ .write = hisi_pcie_wr_conf,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
+
+static int hisi_pcie_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
+ return -ENOMEM;
+
+ cfg->priv = pcie;
+ return 0;
+}
+
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
+ .init = hisi_pcie_platform_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_rd_conf,
+ .write = hisi_pcie_wr_conf,
+ }
+};
+
+static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
+ {
+ .compatible = "hisilicon,hip06-pcie-ecam",
+ .data = &hisi_pcie_platform_ops,
+ },
+ {
+ .compatible = "hisilicon,hip07-pcie-ecam",
+ .data = &hisi_pcie_platform_ops,
+ },
+ {},
+};
+
+static struct platform_driver hisi_pcie_almost_ecam_driver = {
+ .probe = pci_host_common_probe,
+ .driver = {
+ .name = "hisi-pcie-almost-ecam",
+ .of_match_table = hisi_pcie_almost_ecam_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(hisi_pcie_almost_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
new file mode 100644
index 0000000000..fd484cc7c4
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ * Jianguo Sun <sunjianguo1@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0 0x0000
+#define PCIE_SYS_CTRL1 0x0004
+#define PCIE_SYS_CTRL7 0x001C
+#define PCIE_SYS_CTRL13 0x0034
+#define PCIE_SYS_CTRL15 0x003C
+#define PCIE_SYS_CTRL16 0x0040
+#define PCIE_SYS_CTRL17 0x0044
+
+#define PCIE_SYS_STAT0 0x0100
+#define PCIE_SYS_STAT4 0x0110
+
+#define PCIE_RDLH_LINK_UP BIT(5)
+#define PCIE_XMLH_LINK_UP BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+#define PCIE_APP_LTSSM_ENABLE BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28)
+#define PCIE_WM_EP 0
+#define PCIE_WM_LEGACY BIT(1)
+#define PCIE_WM_RC BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE 0x11
+
+struct histb_pcie {
+ struct dw_pcie *pci;
+ struct clk *aux_clk;
+ struct clk *pipe_clk;
+ struct clk *sys_clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct reset_control *soft_reset;
+ struct reset_control *sys_reset;
+ struct reset_control *bus_reset;
+ void __iomem *ctrl;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vpcie;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+ return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+ writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ u32 val;
+
+ histb_pcie_dbi_r_mode(&pci->pp, true);
+ dw_pcie_read(base + reg, size, &val);
+ histb_pcie_dbi_r_mode(&pci->pp, false);
+
+ return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ histb_pcie_dbi_w_mode(&pci->pp, true);
+ dw_pcie_write(base + reg, size, val);
+ histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+ u32 status;
+
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+ status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+ status &= PCIE_LTSSM_STATE_MASK;
+ if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE))
+ return 1;
+
+ return 0;
+}
+
+static int histb_pcie_start_link(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ /* assert LTSSM enable */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+ regval |= PCIE_APP_LTSSM_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+ return 0;
+}
+
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ pp->bridge->ops = &histb_pci_ops;
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
+ .host_init = histb_pcie_host_init,
+};
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_assert(hipcie->bus_reset);
+
+ clk_disable_unprepare(hipcie->aux_clk);
+ clk_disable_unprepare(hipcie->pipe_clk);
+ clk_disable_unprepare(hipcie->sys_clk);
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 1);
+
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
+}
+
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* power on PCIe device if have */
+ if (hipcie->vpcie) {
+ ret = regulator_enable(hipcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 0);
+
+ ret = clk_prepare_enable(hipcie->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable bus clk\n");
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->sys_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable sys clk\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clk\n");
+ goto err_pipe_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clk\n");
+ goto err_aux_clk;
+ }
+
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_deassert(hipcie->soft_reset);
+
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_deassert(hipcie->sys_reset);
+
+ reset_control_assert(hipcie->bus_reset);
+ reset_control_deassert(hipcie->bus_reset);
+
+ return 0;
+
+err_aux_clk:
+ clk_disable_unprepare(hipcie->pipe_clk);
+err_pipe_clk:
+ clk_disable_unprepare(hipcie->sys_clk);
+err_sys_clk:
+ clk_disable_unprepare(hipcie->bus_clk);
+err_bus_clk:
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = histb_pcie_read_dbi,
+ .write_dbi = histb_pcie_write_dbi,
+ .link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie;
+ struct dw_pcie *pci;
+ struct dw_pcie_rp *pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+ if (!hipcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ hipcie->pci = pci;
+ pp = &pci->pp;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control");
+ if (IS_ERR(hipcie->ctrl)) {
+ dev_err(dev, "cannot get control reg base\n");
+ return PTR_ERR(hipcie->ctrl);
+ }
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi");
+ if (IS_ERR(pci->dbi_base)) {
+ dev_err(dev, "cannot get rc-dbi base\n");
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+ if (IS_ERR(hipcie->vpcie)) {
+ if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+ return PTR_ERR(hipcie->vpcie);
+ hipcie->vpcie = NULL;
+ }
+
+ hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio);
+ if (ret) {
+ dev_err(dev, "unable to request reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(hipcie->reset_gpio,
+ "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to set reset gpio name: %d\n", ret);
+ return ret;
+ }
+
+ hipcie->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(hipcie->aux_clk)) {
+ dev_err(dev, "Failed to get PCIe aux clk\n");
+ return PTR_ERR(hipcie->aux_clk);
+ }
+
+ hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(hipcie->pipe_clk)) {
+ dev_err(dev, "Failed to get PCIe pipe clk\n");
+ return PTR_ERR(hipcie->pipe_clk);
+ }
+
+ hipcie->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_clk)) {
+ dev_err(dev, "Failed to get PCIEe sys clk\n");
+ return PTR_ERR(hipcie->sys_clk);
+ }
+
+ hipcie->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_clk)) {
+ dev_err(dev, "Failed to get PCIe bus clk\n");
+ return PTR_ERR(hipcie->bus_clk);
+ }
+
+ hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+ if (IS_ERR(hipcie->soft_reset)) {
+ dev_err(dev, "couldn't get soft reset\n");
+ return PTR_ERR(hipcie->soft_reset);
+ }
+
+ hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_reset)) {
+ dev_err(dev, "couldn't get sys reset\n");
+ return PTR_ERR(hipcie->sys_reset);
+ }
+
+ hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_reset)) {
+ dev_err(dev, "couldn't get bus reset\n");
+ return PTR_ERR(hipcie->bus_reset);
+ }
+
+ hipcie->phy = devm_phy_get(dev, "phy");
+ if (IS_ERR(hipcie->phy)) {
+ dev_info(dev, "no pcie-phy found\n");
+ hipcie->phy = NULL;
+ /* fall through here!
+ * if no pcie-phy found, phy init
+ * should be done under boot!
+ */
+ } else {
+ phy_init(hipcie->phy);
+ }
+
+ pp->ops = &histb_pcie_host_ops;
+
+ platform_set_drvdata(pdev, hipcie);
+
+ ret = histb_pcie_host_enable(pp);
+ if (ret) {
+ dev_err(dev, "failed to enable host\n");
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void histb_pcie_remove(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+ histb_pcie_host_disable(hipcie);
+
+ if (hipcie->phy)
+ phy_exit(hipcie->phy);
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+ { .compatible = "hisilicon,hi3798cv200-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+ .probe = histb_pcie_probe,
+ .remove_new = histb_pcie_remove,
+ .driver = {
+ .name = "histb-pcie",
+ .of_match_table = histb_pcie_of_match,
+ },
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 0000000000..c9c93524e0
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3 180
+#define PORT_AFR_N_FTS_GEN4 196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR 0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
+
+#define PCIE_APP_MSG_CR 0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
+
+#define PCIE_APP_PMC 0x44
+#define PCIE_APP_PMC_IN_L2 BIT(20)
+
+#define PCIE_APP_IRNEN 0xF4
+#define PCIE_APP_IRNCR 0xF8
+#define PCIE_APP_IRN_AER_REPORT BIT(0)
+#define PCIE_APP_IRN_PME BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
+#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_INTA BIT(13)
+#define PCIE_APP_IRN_INTB BIT(14)
+#define PCIE_APP_IRN_INTC BIT(15)
+#define PCIE_APP_IRN_INTD BIT(16)
+#define PCIE_APP_IRN_MSG_LTR BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
+#define PCIE_APP_INTX_OFST 12
+
+#define PCIE_APP_IRN_INT \
+ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
+ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
+
+#define BUS_IATU_OFFSET SZ_256M
+#define RESET_INTERVAL_MS 100
+
+struct intel_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct gpio_desc *reset_gpio;
+ u32 rst_intrvl;
+ struct clk *core_clk;
+ struct reset_control *core_rst;
+ struct phy *phy;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+ u32 old;
+
+ old = readl(base + ofs);
+ val = (old & ~mask) | (val & mask);
+
+ if (val != old)
+ writel(val, base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ writel(val, pcie->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs)
+{
+ return dw_pcie_readl_dbi(&pcie->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ dw_pcie_writel_dbi(&pcie->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie *pcie)
+{
+ u32 val;
+ u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP);
+
+ val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL);
+
+ val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+ pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
+{
+ switch (pci->link_gen) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
+ break;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
+ break;
+ default:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
+ break;
+ }
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ int ret;
+
+ pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Make initial reset last for 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie *pcie)
+{
+ reset_control_assert(pcie->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie)
+{
+ /*
+ * One micro-second delay to make sure the reset pulse
+ * wide enough so that core reset is clean.
+ */
+ udelay(1);
+ reset_control_deassert(pcie->core_rst);
+
+ /*
+ * Some SoC core reset also reset PHY, more delay needed
+ * to make sure the reset process is done.
+ */
+ usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie)
+{
+ msleep(pcie->rst_intrvl);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr(pcie, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ pcie->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->core_clk)) {
+ ret = PTR_ERR(pcie->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clks: %d\n", ret);
+ return ret;
+ }
+
+ pcie->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(pcie->core_rst)) {
+ ret = PTR_ERR(pcie->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get resets: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "reset-assert-ms",
+ &pcie->rst_intrvl);
+ if (ret)
+ pcie->rst_intrvl = RESET_INTERVAL_MS;
+
+ pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(pcie->app_base))
+ return PTR_ERR(pcie->app_base);
+
+ pcie->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie *pcie)
+{
+ u32 value;
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ if (pci->link_gen < 3)
+ return 0;
+
+ /* Send PME_TURN_OFF message */
+ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+ /* Read PMC status and wait for falling into L2 link state */
+ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value,
+ value & PCIE_APP_PMC_IN_L2, 20,
+ jiffies_to_usecs(5 * HZ));
+ if (ret)
+ dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n");
+
+ return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie *pcie)
+{
+ if (dw_pcie_link_up(&pcie->pci))
+ intel_pcie_wait_l2(pcie);
+
+ /* Put endpoint device in reset state */
+ intel_pcie_device_rst_assert(pcie);
+ pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie *pcie)
+{
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ intel_pcie_core_rst_assert(pcie);
+ intel_pcie_device_rst_assert(pcie);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ intel_pcie_core_rst_deassert(pcie);
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret);
+ goto clk_err;
+ }
+
+ pci->atu_base = pci->dbi_base + 0xC0000;
+
+ intel_pcie_ltssm_disable(pcie);
+ intel_pcie_link_setup(pcie);
+ intel_pcie_init_n_fts(pci);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(pcie);
+ intel_pcie_ltssm_enable(pcie);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto app_init_err;
+
+ /* Enable integrated interrupts */
+ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ PCIE_APP_IRN_INT);
+
+ return 0;
+
+app_init_err:
+ clk_disable_unprepare(pcie->core_clk);
+clk_err:
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+
+ return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie *pcie)
+{
+ intel_pcie_core_irq_disable(pcie);
+ intel_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->core_clk);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+}
+
+static void intel_pcie_remove(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+
+ dw_pcie_host_deinit(pp);
+ __intel_pcie_remove(pcie);
+}
+
+static int intel_pcie_suspend_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ intel_pcie_core_irq_disable(pcie);
+ ret = intel_pcie_wait_l2(pcie);
+ if (ret)
+ return ret;
+
+ phy_exit(pcie->phy);
+ clk_disable_unprepare(pcie->core_clk);
+ return ret;
+}
+
+static int intel_pcie_resume_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ return cpu_addr + BUS_IATU_OFFSET;
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+ .cpu_addr_fixup = intel_pcie_cpu_addr,
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+ .host_init = intel_pcie_rc_init,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pcie);
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pp = &pci->pp;
+
+ ret = intel_pcie_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ ret = intel_pcie_ep_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ pci->ops = &intel_pcie_ops;
+ pp->ops = &intel_pcie_dw_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Cannot initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+ { .compatible = "intel,lgm-pcie" },
+ {}
+};
+
+static struct platform_driver intel_pcie_driver = {
+ .probe = intel_pcie_probe,
+ .remove_new = intel_pcie_remove,
+ .driver = {
+ .name = "intel-gw-pcie",
+ .of_match_table = of_intel_pcie_match,
+ .pm = &intel_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 0000000000..289bff99d7
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline void keembay_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct dw_pcie_rp *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
new file mode 100644
index 0000000000..2ee1467679
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
+ * https://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR 0x000
+#define SOC_PCIECTRL_CTRL1_ADDR 0x004
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE 0x01c
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PCIE_LINKUP_ENABLE (0x8020)
+#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET 0x60
+#define SCTRL_PCIE_CMOS_BIT 0x10
+#define SCTRL_PCIE_ISO_OFFSET 0x44
+#define SCTRL_PCIE_ISO_BIT 0x30
+#define SCTRL_PCIE_HPCLK_OFFSET 0x190
+#define SCTRL_PCIE_HPCLK_BIT 0x184000
+#define SCTRL_PCIE_OE_OFFSET 0x14a
+#define PCIE_DEBOUNCE_PARAM 0xF0F400
+#define PCIE_OE_BYPASS (0x3 << 28)
+
+/*
+ * Max number of connected PCI slots at an external PCI bridge
+ *
+ * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected
+ * lanes (lane 0 upstream, and the other three lanes, one connected to an
+ * in-board Ethernet adapter and the other two connected to M.2 and mini
+ * PCI slots.
+ *
+ * Each slot has a different clock source and uses a separate PERST# pin.
+ */
+#define MAX_PCI_SLOTS 3
+
+enum pcie_kirin_phy_type {
+ PCIE_KIRIN_INTERNAL_PHY,
+ PCIE_KIRIN_EXTERNAL_PHY
+};
+
+struct kirin_pcie {
+ enum pcie_kirin_phy_type type;
+
+ struct dw_pcie *pci;
+ struct regmap *apb;
+ struct phy *phy;
+ void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
+
+ /* DWC PERST# */
+ int gpio_id_dwc_perst;
+
+ /* Per-slot PERST# */
+ int num_slots;
+ int gpio_id_reset[MAX_PCI_SLOTS];
+ const char *reset_names[MAX_PCI_SLOTS];
+
+ /* Per-slot clkreq */
+ int n_gpio_clkreq;
+ int gpio_id_clkreq[MAX_PCI_SLOTS];
+ const char *clkreq_names[MAX_PCI_SLOTS];
+};
+
+/*
+ * Kirin 960 PHY. Can't be split into a PHY driver without changing the
+ * DT schema.
+ */
+
+#define REF_CLK_FREQ 100000000
+
+/* PHY info located in APB */
+#define PCIE_APB_PHY_CTRL0 0x0
+#define PCIE_APB_PHY_CTRL1 0x4
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PIPE_CLK_STABLE BIT(19)
+#define PHY_REF_PAD_BIT BIT(8)
+#define PHY_PWR_DOWN_BIT BIT(22)
+#define PHY_RST_ACK_BIT BIT(16)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
+#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN 21000
+#define REF_2_PERST_MAX 25000
+#define PERST_2_ACCESS_MIN 10000
+#define PERST_2_ACCESS_MAX 12000
+#define PIPE_CLK_WAIT_MIN 550
+#define PIPE_CLK_WAIT_MAX 600
+#define TIME_CMOS_MIN 100
+#define TIME_CMOS_MAX 105
+#define TIME_PHY_PD_MIN 10
+#define TIME_PHY_PD_MAX 11
+
+struct hi3660_pcie_phy {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *crgctrl;
+ struct regmap *sysctrl;
+ struct clk *apb_sys_clk;
+ struct clk *apb_phy_clk;
+ struct clk *phy_ref_clk;
+ struct clk *aclk;
+ struct clk *aux_clk;
+};
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 val, u32 reg)
+{
+ writel(val, hi3660_pcie_phy->base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 reg)
+{
+ return readl(hi3660_pcie_phy->base + reg);
+}
+
+static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+
+ phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+ if (IS_ERR(phy->phy_ref_clk))
+ return PTR_ERR(phy->phy_ref_clk);
+
+ phy->aux_clk = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(phy->aux_clk))
+ return PTR_ERR(phy->aux_clk);
+
+ phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+ if (IS_ERR(phy->apb_phy_clk))
+ return PTR_ERR(phy->apb_phy_clk);
+
+ phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+ if (IS_ERR(phy->apb_sys_clk))
+ return PTR_ERR(phy->apb_sys_clk);
+
+ phy->aclk = devm_clk_get(dev, "pcie_aclk");
+ if (IS_ERR(phy->aclk))
+ return PTR_ERR(phy->aclk);
+
+ return 0;
+}
+
+static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+ struct platform_device *pdev;
+
+ /* registers */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(phy->crgctrl))
+ return PTR_ERR(phy->crgctrl);
+
+ phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(phy->sysctrl))
+ return PTR_ERR(phy->sysctrl);
+
+ return 0;
+}
+
+static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+ u32 reg_val;
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_REF_PAD_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0);
+ reg_val &= ~PHY_PWR_DOWN_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0);
+ usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_RST_ACK_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
+
+ usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
+ if (reg_val & PIPE_CLK_STABLE) {
+ dev_err(dev, "PIPE clk is not stable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy)
+{
+ u32 val;
+
+ regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+ val |= PCIE_DEBOUNCE_PARAM;
+ val &= ~PCIE_OE_BYPASS;
+ regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable)
+{
+ int ret = 0;
+
+ if (!enable)
+ goto close_clk;
+
+ ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->phy_ref_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->apb_sys_clk);
+ if (ret)
+ goto apb_sys_fail;
+
+ ret = clk_prepare_enable(phy->apb_phy_clk);
+ if (ret)
+ goto apb_phy_fail;
+
+ ret = clk_prepare_enable(phy->aclk);
+ if (ret)
+ goto aclk_fail;
+
+ ret = clk_prepare_enable(phy->aux_clk);
+ if (ret)
+ goto aux_clk_fail;
+
+ return 0;
+
+close_clk:
+ clk_disable_unprepare(phy->aux_clk);
+aux_clk_fail:
+ clk_disable_unprepare(phy->aclk);
+aclk_fail:
+ clk_disable_unprepare(phy->apb_phy_clk);
+apb_phy_fail:
+ clk_disable_unprepare(phy->apb_sys_clk);
+apb_sys_fail:
+ clk_disable_unprepare(phy->phy_ref_clk);
+
+ return ret;
+}
+
+static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+ int ret;
+
+ /* Power supply for Host */
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+ usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+
+ hi3660_pcie_phy_oe_enable(phy);
+
+ ret = hi3660_pcie_phy_clk_ctrl(phy, true);
+ if (ret)
+ return ret;
+
+ /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+ regmap_write(phy->crgctrl,
+ CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+ ret = hi3660_pcie_phy_start(phy);
+ if (ret)
+ goto disable_clks;
+
+ return 0;
+
+disable_clks:
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+ return ret;
+}
+
+static int hi3660_pcie_phy_init(struct platform_device *pdev,
+ struct kirin_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_pcie_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ pcie->phy_priv = phy;
+ phy->dev = dev;
+
+ ret = hi3660_pcie_phy_get_clk(phy);
+ if (ret)
+ return ret;
+
+ return hi3660_pcie_phy_get_resource(phy);
+}
+
+static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+
+ /* Drop power supply for Host */
+ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00);
+
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+
+ return 0;
+}
+
+/*
+ * The non-PHY part starts here
+ */
+
+static const struct regmap_config pcie_kirin_regmap_conf = {
+ .name = "kirin_pcie_apb",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ char name[32];
+ int ret, i;
+
+ /* This is an optional property */
+ ret = gpiod_count(dev, "hisilicon,clken");
+ if (ret < 0)
+ return 0;
+
+ if (ret > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many GPIO clock requests!\n");
+ return -EINVAL;
+ }
+
+ pcie->n_gpio_clkreq = ret;
+
+ for (i = 0; i < pcie->n_gpio_clkreq; i++) {
+ pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
+ "hisilicon,clken-gpios", i);
+ if (pcie->gpio_id_clkreq[i] < 0)
+ return pcie->gpio_id_clkreq[i];
+
+ sprintf(name, "pcie_clkreq_%d", i);
+ pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->clkreq_names[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ int ret, slot, i;
+ char name[32];
+
+ for_each_available_child_of_node(node, parent) {
+ for_each_available_child_of_node(parent, child) {
+ i = pcie->num_slots;
+
+ pcie->gpio_id_reset[i] = of_get_named_gpio(child,
+ "reset-gpios", 0);
+ if (pcie->gpio_id_reset[i] < 0)
+ continue;
+
+ pcie->num_slots++;
+ if (pcie->num_slots > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many PCI slots!\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
+
+ ret = of_pci_get_devfn(child);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse devfn: %d\n", ret);
+ goto put_node;
+ }
+
+ slot = PCI_SLOT(ret);
+
+ sprintf(name, "pcie_perst_%d", slot);
+ pcie->reset_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->reset_names[i]) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+ }
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
+ of_node_put(parent);
+ return ret;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *node = dev->of_node;
+ void __iomem *apb_base;
+ int ret;
+
+ apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base))
+ return PTR_ERR(apb_base);
+
+ kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base,
+ &pcie_kirin_regmap_conf);
+ if (IS_ERR(kirin_pcie->apb))
+ return PTR_ERR(kirin_pcie->apb);
+
+ /* pcie internal PERST# gpio */
+ kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
+ "reset-gpios", 0);
+ if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
+ dev_err(dev, "unable to get a valid gpio pin\n");
+ return -ENODEV;
+ }
+
+ ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ /* Parse OF children */
+ for_each_available_child_of_node(node, child) {
+ ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
+ if (ret)
+ goto put_node;
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
+ return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val);
+}
+
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ int i, ret;
+
+ if (!kirin_pcie->num_slots)
+ return 0;
+
+ /* Send PERST# to each slot */
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ if (ret) {
+ dev_err(pci->dev, "PERST# %s error: %d\n",
+ kirin_pcie->reset_names[i], ret);
+ }
+ }
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+}
+
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+ .add_bus = kirin_pcie_add_bus,
+};
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 ret;
+
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+ dw_pcie_read(base + reg, size, &ret);
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+ return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+ dw_pcie_write(base + reg, size, val);
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
+ if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static int kirin_pcie_start_link(struct dw_pcie *pci)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+ /* assert LTSSM enable */
+ regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE,
+ PCIE_LTSSM_ENABLE_BIT);
+
+ return 0;
+}
+
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ pp->bridge->ops = &kirin_pci_ops;
+
+ return 0;
+}
+
+static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
+ struct device *dev)
+{
+ int ret, i;
+
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->reset_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
+ kirin_pcie->reset_names[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->clkreq_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
+ kirin_pcie->clkreq_names[i]);
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops kirin_dw_pcie_ops = {
+ .read_dbi = kirin_pcie_read_dbi,
+ .write_dbi = kirin_pcie_write_dbi,
+ .link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
+};
+
+static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
+ .host_init = kirin_pcie_host_init,
+};
+
+static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
+{
+ int i;
+
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY)
+ return hi3660_pcie_phy_power_off(kirin_pcie);
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
+ gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+
+ phy_power_off(kirin_pcie->phy);
+ phy_exit(kirin_pcie->phy);
+
+ return 0;
+}
+
+static int kirin_pcie_power_on(struct platform_device *pdev,
+ struct kirin_pcie *kirin_pcie)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) {
+ ret = hi3660_pcie_phy_init(pdev, kirin_pcie);
+ if (ret)
+ return ret;
+
+ ret = hi3660_pcie_phy_power_on(kirin_pcie);
+ if (ret)
+ return ret;
+ } else {
+ kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(kirin_pcie->phy))
+ return PTR_ERR(kirin_pcie->phy);
+
+ ret = kirin_pcie_gpio_request(kirin_pcie, dev);
+ if (ret)
+ return ret;
+
+ ret = phy_init(kirin_pcie->phy);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(kirin_pcie->phy);
+ if (ret)
+ goto err;
+ }
+
+ /* perst assert Endpoint */
+ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+
+ if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+err:
+ kirin_pcie_power_off(kirin_pcie);
+
+ return ret;
+}
+
+static int kirin_pcie_remove(struct platform_device *pdev)
+{
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&kirin_pcie->pci->pp);
+
+ kirin_pcie_power_off(kirin_pcie);
+
+ return 0;
+}
+
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
+static const struct of_device_id kirin_pcie_match[] = {
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
+ {},
+};
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
+ struct kirin_pcie *kirin_pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "NULL node\n");
+ return -EINVAL;
+ }
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+ if (!kirin_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
+ kirin_pcie->pci = pci;
+ kirin_pcie->type = data->phy_type;
+
+ ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, kirin_pcie);
+
+ ret = kirin_pcie_power_on(pdev, kirin_pcie);
+ if (ret)
+ return ret;
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+ .remove = kirin_pcie_remove,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(kirin_pcie_driver);
+
+MODULE_DEVICE_TABLE(of, kirin_pcie_match);
+MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs");
+MODULE_AUTHOR("Xiaowei Song <songxiaowei@huawei.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 0000000000..9b62ee6992
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe Endpoint controller driver
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_DB_CTRL 0x10
+#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PARF_DEBUG_INT_EN 0x190
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_CFG_BITS 0x210
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SLV_ADDR_MSB_CTRL 0x2c0
+#define PARF_DBI_BASE_ADDR 0x350
+#define PARF_DBI_BASE_ADDR_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_DOWN BIT(1)
+#define PARF_INT_ALL_BME BIT(2)
+#define PARF_INT_ALL_PM_TURNOFF BIT(3)
+#define PARF_INT_ALL_DEBUG BIT(4)
+#define PARF_INT_ALL_LTR BIT(5)
+#define PARF_INT_ALL_MHI_Q6 BIT(6)
+#define PARF_INT_ALL_MHI_A7 BIT(7)
+#define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
+#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
+#define PARF_INT_ALL_MMIO_WRITE BIT(10)
+#define PARF_INT_ALL_CFG_WRITE BIT(11)
+#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_ALL_AER_LEGACY BIT(14)
+#define PARF_INT_ALL_PLS_ERR BIT(15)
+#define PARF_INT_ALL_PME_LEGACY BIT(16)
+#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
+
+/* PARF_BDF_TO_SID_CFG register fields */
+#define PARF_BDF_TO_SID_BYPASS BIT(0)
+
+/* PARF_DEBUG_INT_EN register fields */
+#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
+#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
+#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define PARF_DEVICE_TYPE_EP 0x0
+
+/* PARF_PM_CTRL register fields */
+#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
+#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
+#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
+/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
+
+/* PARF_Q2A_FLUSH register fields */
+#define PARF_Q2A_FLUSH_EN BIT(16)
+
+/* PARF_SYS_CTRL register fields */
+#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
+#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
+#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
+
+/* PARF_DB_CTRL register fields */
+#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
+#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
+#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
+#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
+#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
+
+/* PARF_CFG_BITS register fields */
+#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+
+/* ELBI registers */
+#define ELBI_SYS_STTS 0x08
+#define ELBI_CS2_ENABLE 0xa4
+
+/* DBI registers */
+#define DBI_CON_STATUS 0x44
+
+/* DBI register fields */
+#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
+
+#define XMLH_LINK_UP 0x400
+#define CORE_RESET_TIME_US_MIN 1000
+#define CORE_RESET_TIME_US_MAX 1005
+#define WAKE_DELAY_US 2000 /* 2 ms */
+
+#define PCIE_GEN1_BW_MBPS 250
+#define PCIE_GEN2_BW_MBPS 500
+#define PCIE_GEN3_BW_MBPS 985
+#define PCIE_GEN4_BW_MBPS 1969
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+enum qcom_pcie_ep_link_status {
+ QCOM_PCIE_EP_LINK_DISABLED,
+ QCOM_PCIE_EP_LINK_ENABLED,
+ QCOM_PCIE_EP_LINK_UP,
+ QCOM_PCIE_EP_LINK_DOWN,
+};
+
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @elbi: Designware PCIe specific ELBI register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @icc_mem: Handle to an interconnect path between PCIe and MEM
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
+struct qcom_pcie_ep {
+ struct dw_pcie pci;
+
+ void __iomem *parf;
+ void __iomem *elbi;
+ void __iomem *mmio;
+ struct regmap *perst_map;
+ struct resource *mmio_res;
+
+ struct reset_control *core_reset;
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct phy *phy;
+ struct dentry *debugfs;
+
+ struct icc_path *icc_mem;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ u32 perst_en;
+ u32 perst_sep_en;
+
+ enum qcom_pcie_ep_link_status link_status;
+ int global_irq;
+ int perst_irq;
+};
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_assert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ ret = reset_control_deassert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot de-assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ return 0;
+}
+
+/*
+ * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
+ * device reset during host reboot and hibernation. The driver is
+ * expected to handle this situation.
+ */
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
+}
+
+static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+
+ return reg & XMLH_LINK_UP;
+}
+
+static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ enable_irq(pcie_ep->perst_irq);
+
+ return 0;
+}
+
+static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ disable_irq(pcie_ep->perst_irq);
+}
+
+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ int ret;
+
+ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
+
+ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
+}
+
+static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ u32 offset, status, bw;
+ int speed, width;
+ int ret;
+
+ if (!pcie_ep->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ switch (speed) {
+ case 1:
+ bw = MBps_to_icc(PCIE_GEN1_BW_MBPS);
+ break;
+ case 2:
+ bw = MBps_to_icc(PCIE_GEN2_BW_MBPS);
+ break;
+ case 3:
+ bw = MBps_to_icc(PCIE_GEN3_BW_MBPS);
+ break;
+ default:
+ dev_warn(pci->dev, "using default GEN4 bandwidth\n");
+ fallthrough;
+ case 4:
+ bw = MBps_to_icc(PCIE_GEN4_BW_MBPS);
+ break;
+ }
+
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, width * bw);
+ if (ret)
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+}
+
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_set_mode_ext(pcie_ep->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_EP);
+ if (ret)
+ goto err_phy_exit;
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, MBps_to_icc(PCIE_GEN1_BW_MBPS));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ goto err_phy_off;
+ }
+
+ return 0;
+
+err_phy_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ icc_set_bw(pcie_ep->icc_mem, 0, 0);
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+ gpiod_set_value_cansleep(pcie_ep->wake, 0);
+
+ qcom_pcie_ep_configure_tcsr(pcie_ep);
+
+ /* Disable BDF to SID mapping */
+ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+ val |= PARF_BDF_TO_SID_BYPASS;
+ writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+
+ /* Enable debug IRQ */
+ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
+ val |= PARF_DEBUG_INT_RADM_PM_TURNOFF |
+ PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
+ PARF_DEBUG_INT_PM_DSTATE_CHANGE;
+ writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
+
+ /* Allow entering L1 state */
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+
+ /* Read halts write */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+
+ /* Write after write halt */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ /* Q2A flush disable */
+ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
+ val &= ~PARF_Q2A_FLUSH_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
+
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
+ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
+ val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
+ PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
+ PARF_SYS_CTRL_AUX_PWR_DET;
+ writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
+
+ /* Disable the debouncers */
+ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
+ val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
+ PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
+ PARF_DB_CTRL_MST_WKP_BLOCK;
+ writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
+ val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L0SEL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
+ val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
+ PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+
+ ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ /*
+ * The physical address of the MMIO region which is exposed as the BAR
+ * should be written to MHI BASE registers.
+ */
+ writel_relaxed(pcie_ep->mmio_res->start,
+ pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
+ writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
+ val |= BIT(8);
+ writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+
+ return 0;
+
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
+
+ return ret;
+}
+
+static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+ dev_dbg(dev, "Link is already disabled\n");
+ return;
+ }
+
+ qcom_pcie_disable_resources(pcie_ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+};
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->elbi))
+ return PTR_ERR(pcie_ep->elbi);
+
+ pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
+ if (!syscon) {
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
+ }
+
+ pcie_ep->perst_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(pcie_ep->perst_map))
+ return PTR_ERR(pcie_ep->perst_map);
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 1, &pcie_ep->perst_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Enable offset in syscon\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 2, &pcie_ep->perst_sep_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Separation Enable offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
+
+ pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(pcie_ep->core_reset))
+ return PTR_ERR(pcie_ep->core_reset);
+
+ pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie_ep->reset))
+ return PTR_ERR(pcie_ep->reset);
+
+ pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie_ep->wake))
+ return PTR_ERR(pcie_ep->wake);
+
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem");
+ if (IS_ERR(pcie_ep->icc_mem))
+ ret = PTR_ERR(pcie_ep->icc_mem);
+
+ return ret;
+}
+
+/* TODO: Notify clients about PCIe state change */
+static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
+ u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
+ u32 dstate, val;
+
+ writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
+ status &= mask;
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
+ dev_dbg(dev, "Received Linkdown event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ pci_epc_linkdown(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
+ dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ qcom_pcie_ep_icc_update(pcie_ep);
+ pci_epc_bme_notify(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
+ dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_READY_ENTR_L23;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
+ dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
+ DBI_CON_STATUS_POWER_STATE_MASK;
+ dev_dbg(dev, "Received D%d state event\n", dstate);
+ if (dstate == 3) {
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_REQ_EXIT_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ }
+ } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
+ } else {
+ dev_err(dev, "Received unknown event: %d\n", status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 perst;
+
+ perst = gpiod_get_value(pcie_ep->reset);
+ if (perst) {
+ dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
+ qcom_pcie_perst_assert(pci);
+ } else {
+ dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
+ qcom_pcie_perst_deassert(pci);
+ }
+
+ irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ int ret;
+
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
+ qcom_pcie_ep_global_irq_thread,
+ IRQF_ONESHOT,
+ "global_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request Global IRQ\n");
+ return ret;
+ }
+
+ pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
+ irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
+ qcom_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
+ disable_irq(pcie_ep->global_irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = SZ_4K,
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static const struct dw_pcie_ep_ops pci_ep_ops = {
+ .ep_init = qcom_pcie_ep_init,
+ .raise_irq = qcom_pcie_ep_raise_irq,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep *pcie_ep;
+ char *name;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->pci.dev = dev;
+ pcie_ep->pci.ops = &pci_ops;
+ pcie_ep->pci.ep.ops = &pci_ep_ops;
+ pcie_ep->pci.edma.nr_irqs = 1;
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret)
+ goto err_disable_resources;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
+ return 0;
+
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
+
+ return ret;
+}
+
+static void qcom_pcie_ep_remove(struct platform_device *pdev)
+{
+ struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
+ return;
+
+ qcom_pcie_disable_resources(pcie_ep);
+}
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .remove_new = qcom_pcie_ep_remove,
+ .driver = {
+ .name = "qcom-pcie-ep",
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
+
+MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
new file mode 100644
index 0000000000..64420ecc24
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -0,0 +1,1655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_PM_CTRL 0x20
+#define PARF_PCS_DEEMPH 0x34
+#define PARF_PCS_SWING 0x38
+#define PARF_PHY_CTRL 0x40
+#define PARF_PHY_REFCLK 0x4c
+#define PARF_CONFIG_BITS 0x50
+#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_SID_OFFSET 0x234
+#define PARF_BDF_TRANSLATE_CFG 0x24c
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_TABLE_N 0x2000
+
+/* ELBI registers */
+#define ELBI_SYS_CTRL 0x04
+
+/* DBI registers */
+#define AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define AXI_MSTR_RESP_COMP_CTRL1 0x81c
+
+/* MHI registers */
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+
+/* PARF_SYS_CTRL register fields */
+#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+/* PARF_PM_CTRL register fields */
+#define REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_PCS_DEEMPH register fields */
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x)
+
+/* PARF_PCS_SWING register fields */
+#define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x)
+#define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x)
+
+/* PARF_PHY_CTRL register fields */
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
+#define PHY_TEST_PWR_DOWN BIT(0)
+
+/* PARF_PHY_REFCLK register fields */
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
+/* PARF_CONFIG_BITS register fields */
+#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
+
+/* PARF_SLV_ADDR_SPACE_SIZE register value */
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+/* PARF_MHI_CLOCK_RESET_CTRL register fields */
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define EN BIT(31)
+
+/* PARF_LTSSM register fields */
+#define LTSSM_EN BIT(8)
+
+/* PARF_DEVICE_TYPE register fields */
+#define DEVICE_TYPE_RC 0x4
+
+/* ELBI_SYS_CTRL register fields */
+#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+
+/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+/* PCI_EXP_SLTCAP register fields */
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
+
+#define PERST_DELAY_US 1000
+
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
+#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
+struct qcom_pcie_resources_1_0_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
+#define QCOM_PCIE_2_1_0_MAX_RESETS 6
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
+ int num_resets;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
+#define QCOM_PCIE_2_3_3_MAX_RESETS 7
+struct qcom_pcie_resources_2_3_3 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
+};
+
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
+#define QCOM_PCIE_2_4_0_MAX_RESETS 12
+struct qcom_pcie_resources_2_4_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
+ int num_resets;
+};
+
+#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
+#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
+struct qcom_pcie_resources_2_7_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ int num_clks;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
+ struct reset_control *rst;
+};
+
+#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct reset_control *rst;
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie_cfg {
+ const struct qcom_pcie_ops *ops;
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ void __iomem *mhi;
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ struct icc_path *icc_mem;
+ const struct qcom_pcie_cfg *cfg;
+ struct dentry *debugfs;
+ bool suspended;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(100);
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ /* Enable Link Training state machine */
+ if (pcie->cfg->ops->ltssm_enable)
+ pcie->cfg->ops->ltssm_enable(pcie);
+
+ return 0;
+}
+
+static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ val &= ~PCI_EXP_SLTCAP_HPC;
+ writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + ELBI_SYS_CTRL);
+ val |= ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "core";
+ res->clks[2].id = "phy";
+ res->clks[3].id = "aux";
+ res->clks[4].id = "ref";
+
+ /* iface, core, phy are required */
+ ret = devm_clk_bulk_get(dev, 3, res->clks);
+ if (ret < 0)
+ return ret;
+
+ /* aux, ref are optional */
+ ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
+ if (ret < 0)
+ return ret;
+
+ res->resets[0].id = "pci";
+ res->resets[1].id = "axi";
+ res->resets[2].id = "ahb";
+ res->resets[3].id = "por";
+ res->resets[4].id = "phy";
+ res->resets[5].id = "ext";
+
+ /* ext is optional on APQ8016 */
+ res->num_resets = is_apq ? 5 : 6;
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ reset_control_bulk_assert(res->num_resets, res->resets);
+
+ writel(1, pcie->parf + PARF_PHY_CTRL);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
+ of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
+ }
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+ }
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PARF_PHY_REFCLK);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PARF_PHY_REFCLK);
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "aux";
+ res->clks[2].id = "master_bus";
+ res->clks[3].id = "slave_bus";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_reset;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_assert_reset:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PARF_LTSSM);
+ val |= LTSSM_EN;
+ writel(val, pcie->parf + PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "cfg";
+ res->clks[2].id = "bus_master";
+ res->clks[3].id = "bus_slave";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+ int ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "master_bus";
+ res->clks[2].id = "slave_bus";
+ res->clks[3].id = "iface";
+
+ /* qcom,pcie-ipq4019 is defined without "iface" */
+ res->num_clks = is_ipq ? 3 : 4;
+
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->resets[0].id = "axi_m";
+ res->resets[1].id = "axi_s";
+ res->resets[2].id = "axi_m_sticky";
+ res->resets[3].id = "pipe_sticky";
+ res->resets[4].id = "pwr";
+ res->resets[5].id = "ahb";
+ res->resets[6].id = "pipe";
+ res->resets[7].id = "axi_m_vmid";
+ res->resets[8].id = "axi_s_xpu";
+ res->resets[9].id = "parf";
+ res->resets[10].id = "phy";
+ res->resets[11].id = "phy_ahb";
+
+ res->num_resets = is_ipq ? 12 : 6;
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret) {
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "ahb";
+ res->clks[4].id = "aux";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst[0].id = "axi_m";
+ res->rst[1].id = "axi_s";
+ res->rst[2].id = "pipe";
+ res->rst[3].id = "axi_m_sticky";
+ res->rst[4].id = "sticky";
+ res->rst[5].id = "ahb";
+ res->rst[6].id = "sleep";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_resets;
+ }
+
+ return 0;
+
+err_assert_resets:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PARF_SYS_CTRL);
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
+
+ writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ unsigned int num_clks, num_opt_clks;
+ unsigned int idx;
+ int ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ idx = 0;
+ res->clks[idx++].id = "aux";
+ res->clks[idx++].id = "cfg";
+ res->clks[idx++].id = "bus_master";
+ res->clks[idx++].id = "bus_slave";
+ res->clks[idx++].id = "slave_q2a";
+
+ num_clks = idx;
+
+ ret = devm_clk_bulk_get(dev, num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->clks[idx++].id = "tbu";
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ res->clks[idx++].id = "aggre0";
+ res->clks[idx++].id = "aggre1";
+ res->clks[idx++].id = "noc_aggr";
+ res->clks[idx++].id = "noc_aggr_4";
+ res->clks[idx++].id = "noc_aggr_south_sf";
+ res->clks[idx++].id = "cnoc_qx";
+ res->clks[idx++].id = "sleep";
+ res->clks[idx++].id = "cnoc_sf_axi";
+
+ num_opt_clks = idx - num_clks;
+ res->num_clks = idx;
+
+ ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret < 0)
+ goto err_disable_regulators;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ goto err_disable_clocks;
+ }
+
+ usleep_range(1000, 1500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ goto err_disable_clocks;
+ }
+
+ /* Wait for reset to complete, required on SM8450 */
+ usleep_range(1000, 1500);
+
+ /* configure PCIe to RC mode */
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PARF_PM_CTRL);
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+err_disable_clocks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
+ size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "axi_bridge";
+ res->clks[4].id = "rchng";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
+
+ return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
+
+ return 0;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->cfg->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ goto err_deinit;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->cfg->ops->post_init) {
+ ret = pcie->cfg->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ qcom_ep_reset_deassert(pcie);
+
+ if (pcie->cfg->ops->config_sid) {
+ ret = pcie->cfg->ops->config_sid(pcie);
+ if (ret)
+ goto err_assert_reset;
+ }
+
+ return 0;
+
+err_assert_reset:
+ qcom_ep_reset_assert(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->cfg->ops->deinit(pcie);
+
+ return ret;
+}
+
+static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ qcom_ep_reset_assert(pcie);
+ phy_power_off(pcie->phy);
+ pcie->cfg->ops->deinit(pcie);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .host_deinit = qcom_pcie_host_deinit,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .config_sid = qcom_pcie_config_sid_1_9_0,
+};
+
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
+ .ops = &ops_1_0_0,
+};
+
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
+ .ops = &ops_2_1_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
+ .ops = &ops_2_3_2,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
+ .ops = &ops_2_3_3,
+};
+
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
+ .ops = &ops_2_4_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
+ .ops = &ops_2_7_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
+ .ops = &ops_2_9_0,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
+};
+
+static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ int ret;
+
+ pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
+ if (IS_ERR(pcie->icc_mem))
+ return PTR_ERR(pcie->icc_mem);
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u32 offset, status, bw;
+ int speed, width;
+ int ret;
+
+ if (!pcie->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ /* Only update constraints if link is up. */
+ if (!(status & PCI_EXP_LNKSTA_DLLLA))
+ return;
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ switch (speed) {
+ case 1:
+ bw = MBps_to_icc(250);
+ break;
+ case 2:
+ bw = MBps_to_icc(500);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case 3:
+ bw = MBps_to_icc(985);
+ break;
+ }
+
+ ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ }
+}
+
+static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
+ qcom_pcie_link_transition_count);
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ const struct qcom_pcie_cfg *pcie_cfg;
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct resource *res;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg || !pcie_cfg->ops) {
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->cfg = pcie_cfg;
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset)) {
+ ret = PTR_ERR(pcie->reset);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie->parf)) {
+ ret = PTR_ERR(pcie->parf);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
+
+ /* MHI region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
+ if (res) {
+ pcie->mhi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->mhi)) {
+ ret = PTR_ERR(pcie->mhi);
+ goto err_pm_runtime_put;
+ }
+ }
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ goto err_pm_runtime_put;
+ }
+
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ ret = pcie->cfg->ops->get_resources(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ pp->ops = &qcom_pcie_dw_ops;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ goto err_phy_exit;
+ }
+
+ qcom_pcie_icc_update(pcie);
+
+ if (pcie->mhi)
+ qcom_pcie_init_debugfs(pcie);
+
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie->phy);
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int qcom_pcie_suspend_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * Set minimum bandwidth required to keep data path functional during
+ * suspend.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Turn OFF the resources only for controllers without active PCIe
+ * devices. For controllers with active devices, the resources are kept
+ * ON and the link is expected to be in L0/L1 (sub)states.
+ *
+ * Turning OFF the resources for controllers with active PCIe devices
+ * will trigger access violation during the end of the suspend cycle,
+ * as kernel tries to access the PCIe devices config space for masking
+ * MSIs.
+ *
+ * Also, it is not desirable to put the link into L2/L3 state as that
+ * implies VDD supply will be removed and the devices may go into
+ * powerdown state. This will affect the lifetime of the storage devices
+ * like NVMe.
+ */
+ if (!dw_pcie_link_up(pcie->pci)) {
+ qcom_pcie_host_deinit(&pcie->pci->pp);
+ pcie->suspended = true;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_resume_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (pcie->suspended) {
+ ret = qcom_pcie_host_init(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ pcie->suspended = false;
+ }
+
+ qcom_pcie_icc_update(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { }
+};
+
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+
+static const struct dev_pm_ops qcom_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ .pm = &qcom_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
new file mode 100644
index 0000000000..99d47ae803
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@gmail.com>
+ * Mohit Kumar <mohit.kumar.dhaka@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+ struct dw_pcie *pci;
+ void __iomem *app_base;
+ struct phy *phy;
+ struct clk *clk;
+};
+
+struct pcie_app_reg {
+ u32 app_ctrl_0; /* cr0 */
+ u32 app_ctrl_1; /* cr1 */
+ u32 app_status_0; /* cr2 */
+ u32 app_status_1; /* cr3 */
+ u32 msg_status; /* cr4 */
+ u32 msg_payload; /* cr5 */
+ u32 int_sts; /* cr6 */
+ u32 int_clr; /* cr7 */
+ u32 int_mask; /* cr8 */
+ u32 mst_bmisc; /* cr9 */
+ u32 phy_ctrl; /* cr10 */
+ u32 phy_status; /* cr11 */
+ u32 cxpl_debug_info_0; /* cr12 */
+ u32 cxpl_debug_info_1; /* cr13 */
+ u32 ven_msg_ctrl_0; /* cr14 */
+ u32 ven_msg_ctrl_1; /* cr15 */
+ u32 ven_msg_data_0; /* cr16 */
+ u32 ven_msg_data_1; /* cr17 */
+ u32 ven_msi_0; /* cr18 */
+ u32 ven_msi_1; /* cr19 */
+ u32 mst_rmisc; /* cr20 */
+};
+
+/* CR0 ID */
+#define APP_LTSSM_ENABLE_ID 3
+#define DEVICE_TYPE_RC (4 << 25)
+#define MISCTRL_EN_ID 30
+#define REG_TRANSLATION_ENABLE 31
+
+/* CR3 ID */
+#define XMLH_LINK_UP (1 << 6)
+
+/* CR6 */
+#define MSI_CTRL_INT (1 << 26)
+
+#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
+
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ /* enable ltssm */
+ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+ | (1 << APP_LTSSM_ENABLE_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+
+ return 0;
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct spear13xx_pcie *spear13xx_pcie = arg;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+ struct dw_pcie *pci = spear13xx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ unsigned int status;
+
+ status = readl(&app_reg->int_sts);
+
+ if (status & MSI_CTRL_INT) {
+ BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
+ dw_handle_msi_irq(pp);
+ }
+
+ writel(status, &app_reg->int_clr);
+
+ return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
+{
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ /* Enable MSI interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ writel(readl(&app_reg->int_mask) |
+ MSI_CTRL_INT, &app_reg->int_mask);
+}
+
+static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+ return 1;
+
+ return 0;
+}
+
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
+
+ spear13xx_pcie_enable_interrupts(spear13xx_pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+ .host_init = spear13xx_pcie_host_init,
+};
+
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = spear13xx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "spear1340-pcie", spear13xx_pcie);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
+};
+
+static int spear13xx_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct spear13xx_pcie *spear13xx_pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+ if (!spear13xx_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ spear13xx_pcie->pci = pci;
+
+ spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(spear13xx_pcie->phy)) {
+ ret = PTR_ERR(spear13xx_pcie->phy);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev, "probe deferred\n");
+ else
+ dev_err(dev, "couldn't get pcie-phy\n");
+ return ret;
+ }
+
+ phy_init(spear13xx_pcie->phy);
+
+ spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spear13xx_pcie->clk)) {
+ dev_err(dev, "couldn't get clk for pcie\n");
+ return PTR_ERR(spear13xx_pcie->clk);
+ }
+ ret = clk_prepare_enable(spear13xx_pcie->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk for pcie\n");
+ return ret;
+ }
+
+ if (of_property_read_bool(np, "st,pcie-is-gen1"))
+ pci->link_gen = 1;
+
+ platform_set_drvdata(pdev, spear13xx_pcie);
+
+ ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(spear13xx_pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+ { .compatible = "st,spear1340-pcie", },
+ {},
+};
+
+static struct platform_driver spear13xx_pcie_driver = {
+ .probe = spear13xx_pcie_probe,
+ .driver = {
+ .name = "spear-pcie",
+ .of_match_table = spear13xx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 0000000000..55f61914a9
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 0000000000..248cd9347e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,2514 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
+ *
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include "../../pci.h"
+
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
+#define APPL_PINMUX 0x0
+#define APPL_PINMUX_PEX_RST BIT(0)
+#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
+#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
+
+#define APPL_CTRL 0x4
+#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
+#define APPL_CTRL_LTSSM_EN BIT(7)
+#define APPL_CTRL_HW_HOT_RST_EN BIT(20)
+#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
+#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
+
+#define APPL_INTR_EN_L0_0 0x8
+#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
+#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
+#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
+#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
+#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
+#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
+
+#define APPL_INTR_STATUS_L0 0xC
+#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
+#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
+#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
+
+#define APPL_INTR_EN_L1_0_0 0x1C
+#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
+
+#define APPL_INTR_STATUS_L1_0_0 0x20
+#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
+
+#define APPL_INTR_STATUS_L1_1 0x2C
+#define APPL_INTR_STATUS_L1_2 0x30
+#define APPL_INTR_STATUS_L1_3 0x34
+#define APPL_INTR_STATUS_L1_6 0x3C
+#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
+
+#define APPL_INTR_EN_L1_8_0 0x44
+#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
+#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
+#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
+
+#define APPL_INTR_STATUS_L1_8_0 0x4C
+#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
+#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
+#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
+
+#define APPL_INTR_STATUS_L1_9 0x54
+#define APPL_INTR_STATUS_L1_10 0x58
+#define APPL_INTR_STATUS_L1_11 0x64
+#define APPL_INTR_STATUS_L1_13 0x74
+#define APPL_INTR_STATUS_L1_14 0x78
+#define APPL_INTR_STATUS_L1_15 0x7C
+#define APPL_INTR_STATUS_L1_17 0x88
+
+#define APPL_INTR_EN_L1_18 0x90
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_INTR_STATUS_L1_18 0x94
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_MSI_CTRL_1 0xAC
+
+#define APPL_MSI_CTRL_2 0xB0
+
+#define APPL_LEGACY_INTX 0xB8
+
+#define APPL_LTR_MSG_1 0xC4
+#define LTR_MSG_REQ BIT(15)
+#define LTR_MST_NO_SNOOP_SHIFT 16
+
+#define APPL_LTR_MSG_2 0xC8
+#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
+
+#define APPL_LINK_STATUS 0xCC
+#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
+
+#define APPL_DEBUG 0xD0
+#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
+#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
+#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
+#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
+#define LTSSM_STATE_PRE_DETECT 5
+
+#define APPL_RADM_STATUS 0xE4
+#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
+
+#define APPL_DM_TYPE 0x100
+#define APPL_DM_TYPE_MASK GENMASK(3, 0)
+#define APPL_DM_TYPE_RP 0x4
+#define APPL_DM_TYPE_EP 0x0
+
+#define APPL_CFG_BASE_ADDR 0x104
+#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
+
+#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
+#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
+
+#define APPL_CFG_MISC 0x110
+#define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
+#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
+#define APPL_CFG_MISC_ARCACHE_SHIFT 10
+#define APPL_CFG_MISC_ARCACHE_VAL 3
+
+#define APPL_CFG_SLCG_OVERRIDE 0x114
+#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
+
+#define APPL_CAR_RESET_OVRD 0x12C
+#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
+
+#define IO_BASE_IO_DECODE BIT(0)
+#define IO_BASE_IO_DECODE_BIT8 BIT(8)
+
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
+
+#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
+#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
+
+#define N_FTS_VAL 52
+#define FTS_VAL 52
+
+#define GEN3_EQ_CONTROL_OFF 0x8a8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
+
+#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
+#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
+#define PORT_LOGIC_MSIX_DOORBELL 0x948
+
+#define CAP_SPCIE_CAP_OFF 0x154
+#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
+
+#define PME_ACK_TIMEOUT 10000
+
+#define LTSSM_TIMEOUT 50000 /* 50ms */
+
+#define GEN3_GEN4_EQ_PRESET_INIT 5
+
+#define GEN1_CORE_CLK_FREQ 62500000
+#define GEN2_CORE_CLK_FREQ 125000000
+#define GEN3_CORE_CLK_FREQ 250000000
+#define GEN4_CORE_CLK_FREQ 500000000
+
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
+static const unsigned int pcie_gen_freq[] = {
+ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ GEN1_CORE_CLK_FREQ,
+ GEN2_CORE_CLK_FREQ,
+ GEN3_CORE_CLK_FREQ,
+ GEN4_CORE_CLK_FREQ
+};
+
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
+ struct device *dev;
+ struct resource *appl_res;
+ struct resource *dbi_res;
+ struct resource *atu_dma_res;
+ void __iomem *appl_base;
+ struct clk *core_clk;
+ struct reset_control *core_apb_rst;
+ struct reset_control *core_rst;
+ struct dw_pcie pci;
+ struct tegra_bpmp *bpmp;
+
+ struct tegra_pcie_dw_of_data *of_data;
+
+ bool supports_clkreq;
+ bool enable_cdm_check;
+ bool enable_srns;
+ bool link_state;
+ bool update_fc_fixup;
+ bool enable_ext_refclk;
+ u8 init_link_width;
+ u32 msi_ctrl_int;
+ u32 num_lanes;
+ u32 cid;
+ u32 cfg_link_cap_l1sub;
+ u32 ras_des_cap;
+ u32 pcie_cap_base;
+ u32 aspm_cmrt;
+ u32 aspm_pwr_on_t;
+ u32 aspm_l0s_enter_lat;
+
+ struct regulator *pex_ctl_supply;
+ struct regulator *slot_ctl_3v3;
+ struct regulator *slot_ctl_12v;
+
+ unsigned int phy_count;
+ struct phy **phys;
+
+ struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+ long link_status;
+ struct icc_path *icc_path;
+};
+
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+{
+ return container_of(pci, struct tegra_pcie_dw, pci);
+}
+
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->appl_base + reg);
+}
+
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->appl_base + reg);
+}
+
+struct tegra_pcie_soc {
+ enum dw_pcie_device_mode mode;
+};
+
+static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed, width;
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+
+ val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
+
+ if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
+ dev_err(pcie->dev, "can't set bw[%u]\n", val);
+
+ if (speed >= ARRAY_SIZE(pcie_gen_freq))
+ speed = 0;
+
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+}
+
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 current_link_width;
+ u16 val;
+
+ /*
+ * NOTE:- Since this scenario is uncommon and link as such is not
+ * stable anyway, not waiting to confirm if link is really
+ * transitioning to Gen-2 speed
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2);
+ val &= ~PCI_EXP_LNKCTL2_TLS;
+ val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2, val);
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val |= PCI_EXP_LNKCTL_RL;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL, val);
+ }
+ }
+}
+
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
+ u16 val_w;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
+ /* SBR & Surprise Link Down WAR */
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+ udelay(1);
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+ apply_bad_link_workaround(pp);
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
+ PCI_EXP_LNKSTA_CLS);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ dev_info(pci->dev, "CDM check complete\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ dev_err(pci->dev, "CDM comparison mismatch\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ dev_err(pci->dev, "CDM Logic error\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+
+ if (test_and_clear_bit(0, &pcie->link_status))
+ dw_pcie_ep_linkup(ep);
+
+ tegra_pcie_icc_set(pcie);
+
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ int spurious = 1;
+ u32 status_l0, status_l1, link_status;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ set_bit(0, &pcie->link_status);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ *val = 0x00000000;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
+#if defined(CONFIG_PCIEASPM)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+ val &= ~PCI_L1SS_CAP_ASPM_L1_1;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+ val &= ~PCI_L1SS_CAP_ASPM_L1_2;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
+ val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
+ val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
+
+ return val;
+}
+
+static int aspm_state_cnt(struct seq_file *s, void *data)
+{
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+ dev_get_drvdata(s->private);
+ u32 val;
+
+ seq_printf(s, "Tx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
+
+ seq_printf(s, "Rx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
+
+ seq_printf(s, "Link L1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
+
+ seq_printf(s, "Link L1.1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
+
+ seq_printf(s, "Link L1.2 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
+
+ /* Clear all counters */
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
+ EVENT_COUNTER_ALL_CLEAR);
+
+ /* Re-enable counting */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ return 0;
+}
+
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+
+ val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
+ /* Enable ASPM counters */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ /* Program T_cmrt and T_pwr_on values */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
+ val |= (pcie->aspm_cmrt << 8);
+ val |= (pcie->aspm_pwr_on_t << 19);
+ dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+
+ /* Program L0s and L1 entrance latencies */
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+}
+
+static void init_debugfs(struct tegra_pcie_dw *pcie)
+{
+ debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
+}
+#else
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
+#endif
+
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_w;
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
+
+ if (pcie->enable_cdm_check) {
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= pcie->of_data->cdm_chk_int_en_bit;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_18);
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_18);
+ }
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val_w |= PCI_EXP_LNKCTL_LBMIE;
+ dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
+ val_w);
+}
+
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable legacy interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_INT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+ val |= APPL_INTR_EN_L1_8_INTX_EN;
+ val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
+ val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+ if (IS_ENABLED(CONFIG_PCIEAER))
+ val |= APPL_INTR_EN_L1_8_AER_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+}
+
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable MSI interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+}
+
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /* Clear interrupt statuses before enabling interrupts */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ tegra_pcie_enable_system_interrupts(pp);
+ tegra_pcie_enable_legacy_interrupts(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_enable_msi_interrupts(pp);
+}
+
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, offset, i;
+
+ /* Program init preset */
+ for (i = 0; i < pcie->num_lanes; i++) {
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
+ val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
+
+ offset = dw_pcie_find_ext_capability(pci,
+ PCI_EXT_CAP_ID_PL_16GT) +
+ PCI_PL_16GT_LE_CTRL;
+ val = dw_pcie_readb_dbi(pci, offset + i);
+ val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+ val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+ val |= (pcie->of_data->gen4_preset_vec <<
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+}
+
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_16;
+
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+ val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+ dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+
+ val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
+ dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
+
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+ /* Enable as 0xFFFF0001 response for CRS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
+ val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
+
+ /* Configure Max lane width from DT */
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_MLW;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, offset, tmp;
+ bool retry = true;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
+ /* Assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ usleep_range(100, 200);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ /* De-assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ msleep(100);
+
+ if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
+ /*
+ * There are some endpoints which can't get the link up if
+ * root port has Data Link Feature (DLF) enabled.
+ * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
+ * on Scaled Flow Control and DLF.
+ * So, need to confirm that is indeed the case here and attempt
+ * link up once again with DLF disabled.
+ */
+ val = appl_readl(pcie, APPL_DEBUG);
+ val &= APPL_DEBUG_LTSSM_STATE_MASK;
+ val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
+ if (!(val == 0x11 && !tmp)) {
+ /* Link is down for all good reasons */
+ return 0;
+ }
+
+ dev_info(pci->dev, "Link is down in DLL");
+ dev_info(pci->dev, "Trying again with DLFE disabled\n");
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ reset_control_assert(pcie->core_rst);
+ reset_control_deassert(pcie->core_rst);
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
+ val &= ~PCI_DLF_EXCHANGE_ENABLE;
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
+
+ retry = false;
+ goto retry_link;
+ }
+
+ tegra_pcie_icc_set(pcie);
+
+ tegra_pcie_enable_interrupts(pp);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
+static const struct dw_pcie_ops tegra_dw_pcie_ops = {
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
+};
+
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .host_init = tegra_pcie_dw_host_init,
+};
+
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int phy_count = pcie->phy_count;
+
+ while (phy_count--) {
+ phy_power_off(pcie->phys[phy_count]);
+ phy_exit(pcie->phys[phy_count]);
+ }
+}
+
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_power_off;
+
+ ret = phy_power_on(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_exit;
+ }
+
+ return 0;
+
+phy_power_off:
+ while (i--) {
+ phy_power_off(pcie->phys[i]);
+phy_exit:
+ phy_exit(pcie->phys[i]);
+ }
+
+ return ret;
+}
+
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device_node *np = pcie->dev->of_node;
+ int ret;
+
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
+ if (ret < 0) {
+ dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
+ &pcie->aspm_pwr_on_t);
+ if (ret < 0)
+ dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
+ ret);
+
+ ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
+ &pcie->aspm_l0s_enter_lat);
+ if (ret < 0)
+ dev_info(pcie->dev,
+ "Failed to read ASPM L0s Entrance latency: %d\n", ret);
+
+ ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_count_strings(np, "phy-names");
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
+ ret);
+ return ret;
+ }
+ pcie->phy_count = ret;
+
+ if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
+ pcie->update_fc_fixup = true;
+
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
+ pcie->supports_clkreq =
+ of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
+
+ pcie->enable_cdm_check =
+ of_property_read_bool(np, "snps,enable-cdm-check");
+
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
+ return 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
+ req.controller_state.pcie_controller = pcie->cid;
+ req.controller_state.enable = enable;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+ struct pci_bus *child, *root_bus = NULL;
+ struct pci_dev *pdev;
+
+ /*
+ * link doesn't go into L2 state with some of the endpoints with Tegra
+ * if they are not in D0 state. So, need to make sure that immediate
+ * downstream devices are in D0 state before sending PME_TurnOff to put
+ * link into L2 state.
+ * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
+ * 5.2 Link State Power Management (Page #428).
+ */
+
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
+ /* Bring downstream devices to D0 if they are not already in */
+ if (child->parent == pp->bridge->bus) {
+ root_bus = child;
+ break;
+ }
+ }
+
+ if (!root_bus) {
+ dev_err(pcie->dev, "Failed to find downstream devices\n");
+ return;
+ }
+
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ if (PCI_SLOT(pdev->devfn) == 0) {
+ if (pci_set_power_state(pdev, PCI_D0))
+ dev_err(pcie->dev,
+ "Failed to transition %s to D0 state\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
+ if (IS_ERR(pcie->slot_ctl_3v3)) {
+ if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_3v3);
+
+ pcie->slot_ctl_3v3 = NULL;
+ }
+
+ pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
+ if (IS_ERR(pcie->slot_ctl_12v)) {
+ if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_12v);
+
+ pcie->slot_ctl_12v = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ if (pcie->slot_ctl_3v3) {
+ ret = regulator_enable(pcie->slot_ctl_3v3);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 3.3V slot supply: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pcie->slot_ctl_12v) {
+ ret = regulator_enable(pcie->slot_ctl_12v);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 12V slot supply: %d\n", ret);
+ goto fail_12v_enable;
+ }
+ }
+
+ /*
+ * According to PCI Express Card Electromechanical Specification
+ * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
+ * should be a minimum of 100ms.
+ */
+ if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
+ msleep(100);
+
+ return 0;
+
+fail_12v_enable:
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+ return ret;
+}
+
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ if (pcie->slot_ctl_12v)
+ regulator_disable(pcie->slot_ctl_12v);
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+}
+
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+ bool en_hw_hot_rst)
+{
+ int ret;
+ u32 val;
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev,
+ "Failed to enable controller %u: %d\n", pcie->cid, ret);
+ return ret;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = tegra_pcie_enable_slot_regulators(pcie);
+ if (ret < 0)
+ goto fail_slot_reg_en;
+
+ ret = regulator_enable(pcie->pex_ctl_supply);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
+ goto fail_reg_en;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
+ ret);
+ goto fail_core_apb_rst;
+ }
+
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
+ /* Enable HW_HOT_RST mode */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Update CFG base address */
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ /* Configure this core for RP mode operation */
+ appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ if (!pcie->supports_clkreq) {
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ /* Update iATU_DMA base address */
+ appl_writel(pcie,
+ pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ reset_control_deassert(pcie->core_rst);
+
+ return ret;
+
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk:
+ regulator_disable(pcie->pex_ctl_supply);
+fail_reg_en:
+ tegra_pcie_disable_slot_regulators(pcie);
+fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+
+ return ret;
+}
+
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ ret = reset_control_assert(pcie->core_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
+
+ tegra_pcie_disable_phy(pcie);
+
+ ret = reset_control_assert(pcie->core_apb_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ ret = regulator_disable(pcie->pex_ctl_supply);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
+
+ tegra_pcie_disable_slot_regulators(pcie);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
+ pcie->cid, ret);
+}
+
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = tegra_pcie_config_controller(pcie, false);
+ if (ret < 0)
+ return ret;
+
+ pp->ops = &tegra_pcie_dw_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
+ return 0;
+
+ val = appl_readl(pcie, APPL_RADM_STATUS);
+ val |= APPL_PM_XMT_TURNOFF_STATE;
+ appl_writel(pcie, val, APPL_RADM_STATUS);
+
+ return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
+ val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+ 1, PME_ACK_TIMEOUT);
+}
+
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+{
+ u32 data;
+ int err;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+ dev_dbg(pcie->dev, "PCIe link is not up...!\n");
+ return;
+ }
+
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
+ if (tegra_pcie_try_link_l2(pcie)) {
+ dev_info(pcie->dev, "Link didn't transition to L2 state\n");
+ /*
+ * TX lane clock freq will reset to Gen1 only if link is in L2
+ * or detect state.
+ * So apply pex_rst to end point to force RP to go into detect
+ * state
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, data, APPL_PINMUX);
+
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
+ err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
+ data,
+ ((data &
+ APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (err)
+ dev_info(pcie->dev, "Link didn't go to detect state\n");
+ }
+ /*
+ * DBI registers may not be accessible after this as PLL-E would be
+ * down depending on how CLKREQ is pulled by end point
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
+ /* Cut REFCLK to slot */
+ data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, data, APPL_PINMUX);
+}
+
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+{
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ dw_pcie_host_deinit(&pcie->pci.pp);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+}
+
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+{
+ struct device *dev = pcie->dev;
+ char *name;
+ int ret;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+ if (!pcie->link_state) {
+ ret = -ENOMEDIUM;
+ goto fail_host_init;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto fail_host_init;
+ }
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ init_debugfs(pcie);
+
+ return ret;
+
+fail_host_init:
+ tegra_pcie_deinit_controller(pcie);
+fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+ u16 val_16;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
+{
+ const struct tegra_pcie_dw_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *atu_dma_res;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ struct phy **phys;
+ char *name;
+ int ret;
+ u32 i;
+
+ data = of_device_get_match_data(dev);
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = &pdev->dev;
+ pci->ops = &tegra_dw_pcie_ops;
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
+ pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
+
+ ret = tegra_pcie_dw_parse_dt(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
+ return ret;
+ }
+
+ ret = tegra_pcie_get_slot_regulators(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
+ return ret;
+ }
+
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
+ pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
+ if (IS_ERR(pcie->pex_ctl_supply)) {
+ ret = PTR_ERR(pcie->pex_ctl_supply);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get regulator: %ld\n",
+ PTR_ERR(pcie->pex_ctl_supply));
+ return ret;
+ }
+
+ pcie->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->core_clk)) {
+ dev_err(dev, "Failed to get core clock: %ld\n",
+ PTR_ERR(pcie->core_clk));
+ return PTR_ERR(pcie->core_clk);
+ }
+
+ pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "appl");
+ if (!pcie->appl_res) {
+ dev_err(dev, "Failed to find \"appl\" region\n");
+ return -ENODEV;
+ }
+
+ pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
+ if (IS_ERR(pcie->appl_base))
+ return PTR_ERR(pcie->appl_base);
+
+ pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(pcie->core_apb_rst)) {
+ dev_err(dev, "Failed to get APB reset: %ld\n",
+ PTR_ERR(pcie->core_apb_rst));
+ return PTR_ERR(pcie->core_apb_rst);
+ }
+
+ phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
+ if (!phys)
+ return -ENOMEM;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ name = kasprintf(GFP_KERNEL, "p2u-%u", i);
+ if (!name) {
+ dev_err(dev, "Failed to create P2U string\n");
+ return -ENOMEM;
+ }
+ phys[i] = devm_phy_get(dev, name);
+ kfree(name);
+ if (IS_ERR(phys[i])) {
+ ret = PTR_ERR(phys[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get PHY: %d\n", ret);
+ return ret;
+ }
+ }
+
+ pcie->phys = phys;
+
+ atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "atu_dma");
+ if (!atu_dma_res) {
+ dev_err(dev, "Failed to find \"atu_dma\" region\n");
+ return -ENODEV;
+ }
+ pcie->atu_dma_res = atu_dma_res;
+
+ pci->atu_size = resource_size(atu_dma_res);
+ pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+
+ pcie->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(pcie->core_rst)) {
+ dev_err(dev, "Failed to get core reset: %ld\n",
+ PTR_ERR(pcie->core_rst));
+ return PTR_ERR(pcie->core_rst);
+ }
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pcie->bpmp = tegra_bpmp_get(dev);
+ if (IS_ERR(pcie->bpmp))
+ return PTR_ERR(pcie->bpmp);
+
+ platform_set_drvdata(pdev, pcie);
+
+ pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
+ ret = PTR_ERR_OR_ZERO(pcie->icc_path);
+ if (ret) {
+ tegra_bpmp_put(pcie->bpmp);
+ dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
+ return ret;
+ }
+
+ switch (pcie->of_data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
+ }
+
+fail:
+ tegra_bpmp_put(pcie->bpmp);
+ return ret;
+}
+
+static void tegra_pcie_dw_remove(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+
+ pm_runtime_disable(pcie->dev);
+ tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Enable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->link_state)
+ return 0;
+
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->link_state)
+ return 0;
+
+ ret = tegra_pcie_config_controller(pcie, true);
+ if (ret < 0)
+ return ret;
+
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init host: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ dw_pcie_setup_rc(&pcie->pci.pp);
+
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_dw_resume_early(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Disable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
+
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
+
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+}
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
+ {
+ .compatible = "nvidia,tegra194-pcie",
+ .data = &tegra194_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
+ },
+ {}
+};
+
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
+};
+
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove_new = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
+ .driver = {
+ .name = "tegra194-pcie",
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
+ },
+};
+module_platform_driver(tegra_pcie_dw_driver);
+
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+
+MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 0000000000..cba3c88fcf
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct uniphier_pcie_ep_soc_data *data;
+};
+
+struct uniphier_pcie_ep_soc_data {
+ bool has_gio;
+ void (*init)(struct uniphier_pcie_ep_priv *priv);
+ int (*wait)(struct uniphier_pcie_ep_priv *priv);
+ const struct pci_epc_features features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(priv->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, priv->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(priv->pci.dev,
+ "Failed to initialize controller in EP mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return uniphier_pcie_ep_raise_legacy_irq(ep);
+ case PCI_EPC_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return &priv->data->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .ep_init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->init)
+ priv->data->init(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ if (priv->data->wait) {
+ ret = priv->data->wait(priv);
+ if (ret)
+ goto out_phy_exit;
+ }
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(priv->phy);
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (priv->data->has_gio) {
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+ }
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ return dw_pcie_ep_init(&priv->pci.ep);
+}
+
+static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
+ .has_gio = true,
+ .init = uniphier_pcie_pro5_init_ep,
+ .wait = NULL,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+ },
+};
+
+static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
+ .has_gio = false,
+ .init = uniphier_pcie_nx1_init_ep,
+ .wait = uniphier_pcie_nx1_wait_ep,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 12,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ },
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-ep",
+ .data = &uniphier_nx1_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
new file mode 100644
index 0000000000..48c3eba817
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+#define PCL_RCV_INT 0x8108
+#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17)
+#define PCL_CFG_BW_MGT_STATUS BIT(4)
+#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3)
+#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2)
+#define PCL_CFG_PME_MSI_STATUS BIT(1)
+
+#define PCL_RCV_INTX 0x810c
+#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16)
+#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8)
+#define PCL_RCV_INTX_MASK_SHIFT 8
+#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0)
+#define PCL_RCV_INTX_STATUS_SHIFT 0
+
+#define PCL_STATUS_LINK 0x8140
+#define PCL_RDLH_LINK_UP BIT(1)
+#define PCL_XMLH_LINK_UP BIT(0)
+
+struct uniphier_pcie {
+ struct dw_pcie pci;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct irq_domain *legacy_irq_domain;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, pcie->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie)
+{
+ u32 val;
+
+ /* set RC MODE */
+ val = readl(pcie->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, pcie->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(pcie->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, pcie->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(pcie->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, pcie->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(pcie, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(pcie->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, pcie->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(pcie->pci.dev,
+ "Failed to initialize controller in RC mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_link_up(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ u32 val, mask;
+
+ val = readl(pcie->base + PCL_STATUS_LINK);
+ mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(pcie, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(pcie, false);
+}
+
+static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
+{
+ writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX);
+}
+
+
+static void uniphier_pcie_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void uniphier_pcie_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip uniphier_pcie_irq_chip = {
+ .name = "PCI",
+ .irq_mask = uniphier_pcie_irq_mask,
+ .irq_unmask = uniphier_pcie_irq_unmask,
+};
+
+static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops uniphier_intx_domain_ops = {
+ .map = uniphier_pcie_intx_map,
+};
+
+static void uniphier_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long reg;
+ u32 val, bit;
+
+ /* INT for debug */
+ val = readl(pcie->base + PCL_RCV_INT);
+
+ if (val & PCL_CFG_BW_MGT_STATUS)
+ dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
+ if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
+ dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
+ if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
+ dev_dbg(pci->dev, "Root Error\n");
+ if (val & PCL_CFG_PME_MSI_STATUS)
+ dev_dbg(pci->dev, "PME Interrupt\n");
+
+ writel(val, pcie->base + PCL_RCV_INT);
+
+ /* INTx */
+ chained_irq_enter(chip, desc);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
+
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ struct device_node *np = pci->dev->of_node;
+ struct device_node *np_intc;
+ int ret = 0;
+
+ np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!np_intc) {
+ dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ pp->irq = irq_of_parse_and_map(np_intc, 0);
+ if (!pp->irq) {
+ dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto out_put_node;
+ }
+
+ pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ &uniphier_intx_domain_ops, pp);
+ if (!pcie->legacy_irq_domain) {
+ dev_err(pci->dev, "Failed to get INTx domain\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
+ pp);
+
+out_put_node:
+ of_node_put(np_intc);
+ return ret;
+}
+
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ int ret;
+
+ ret = uniphier_pcie_config_legacy_irq(pp);
+ if (ret)
+ return ret;
+
+ uniphier_pcie_irq_enable(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
+ .host_init = uniphier_pcie_host_init,
+};
+
+static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(pcie->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pcie_init_rc(pcie);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ goto out_rst_assert;
+
+ ret = uniphier_pcie_wait_rc(pcie);
+ if (ret)
+ goto out_phy_exit;
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(pcie->phy);
+out_rst_assert:
+ reset_control_assert(pcie->rst);
+out_clk_disable:
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+ .link_up = uniphier_pcie_link_up,
+};
+
+static int uniphier_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &dw_pcie_ops;
+
+ pcie->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ pcie->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(pcie->rst))
+ return PTR_ERR(pcie->rst);
+
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = uniphier_pcie_host_enable(pcie);
+ if (ret)
+ return ret;
+
+ pcie->pci.pp.ops = &uniphier_pcie_host_ops;
+
+ return dw_pcie_host_init(&pcie->pci.pp);
+}
+
+static const struct of_device_id uniphier_pcie_match[] = {
+ { .compatible = "socionext,uniphier-pcie", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_driver = {
+ .probe = uniphier_pcie_probe,
+ .driver = {
+ .name = "uniphier-pcie",
+ .of_match_table = uniphier_pcie_match,
+ },
+};
+builtin_platform_driver(uniphier_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 0000000000..71026fefa3
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);