summaryrefslogtreecommitdiffstats
path: root/drivers/pci/controller
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/pci/controller
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig348
-rw-r--r--drivers/pci/controller/Makefile64
-rw-r--r--drivers/pci/controller/cadence/Kconfig68
-rw-r--r--drivers/pci/controller/cadence/Makefile6
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c566
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c743
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c571
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c185
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c273
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h559
-rw-r--r--drivers/pci/controller/dwc/Kconfig418
-rw-r--r--drivers/pci/controller/dwc/Makefile49
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c960
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c443
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1612
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1338
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c288
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c272
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c497
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c380
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c349
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c500
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c645
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c820
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c880
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c187
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c1064
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h645
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c368
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c180
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c450
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c453
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c465
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c833
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c916
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1655
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c266
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c109
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c2514
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c453
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c409
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c329
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig34
-rw-r--r--drivers/pci/controller/mobiveil/Makefile5
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c255
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c590
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c60
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.c231
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h225
-rw-r--r--drivers/pci/controller/pci-aardvark.c2011
-rw-r--r--drivers/pci/controller/pci-ftpci100.c550
-rw-r--r--drivers/pci/controller/pci-host-common.c101
-rw-r--r--drivers/pci/controller/pci-host-generic.c89
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c67
-rw-r--r--drivers/pci/controller/pci-hyperv.c4121
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c672
-rw-r--r--drivers/pci/controller/pci-loongson.c397
-rw-r--r--drivers/pci/controller/pci-mvebu.c1737
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c342
-rw-r--r--drivers/pci/controller/pci-tegra.c2811
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c363
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c480
-rw-r--r--drivers/pci/controller/pci-v3-semi.c907
-rw-r--r--drivers/pci/controller/pci-versatile.c171
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c528
-rw-r--r--drivers/pci/controller/pci-xgene.c649
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c293
-rw-r--r--drivers/pci/controller/pcie-altera.c829
-rw-r--r--drivers/pci/controller/pcie-apple.c842
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c1628
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c324
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c97
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c681
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c144
-rw-r--r--drivers/pci/controller/pcie-iproc.c1598
-rw-r--r--drivers/pci/controller/pcie-iproc.h131
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c1094
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1255
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1216
-rw-r--r--drivers/pci/controller/pcie-mt7621.c552
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c561
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c1155
-rw-r--r--drivers/pci/controller/pcie-rcar.c120
-rw-r--r--drivers/pci/controller/pcie-rcar.h147
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c631
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c1056
-rw-r--r--drivers/pci/controller/pcie-rockchip.c441
-rw-r--r--drivers/pci/controller/pcie-rockchip.h343
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c663
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c847
-rw-r--r--drivers/pci/controller/pcie-xilinx.c621
-rw-r--r--drivers/pci/controller/vmd.c1135
93 files changed, 60287 insertions, 0 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
new file mode 100644
index 000000000..c0c3f2824
--- /dev/null
+++ b/drivers/pci/controller/Kconfig
@@ -0,0 +1,348 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PCI controller drivers"
+ depends on PCI
+
+config PCI_AARDVARK
+ tristate "Aardvark PCIe controller"
+ depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ select PCI_BRIDGE_EMUL
+ help
+ Add support for Aardvark 64bit PCIe Host Controller. This
+ controller is part of the South Bridge of the Marvel Armada
+ 3700 SoC.
+
+config PCIE_ALTERA
+ tristate "Altera PCIe controller"
+ depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
+ help
+ Say Y here if you want to enable PCIe controller support on Altera
+ FPGA.
+
+config PCIE_ALTERA_MSI
+ tristate "Altera PCIe MSI feature"
+ depends on PCIE_ALTERA
+ depends on PCI_MSI
+ help
+ Say Y here if you want PCIe MSI support for the Altera FPGA.
+ This MSI driver supports Altera MSI to GIC controller IP.
+
+config PCIE_APPLE_MSI_DOORBELL_ADDR
+ hex
+ default 0xfffff000
+ depends on PCIE_APPLE
+
+config PCIE_APPLE
+ tristate "Apple PCIe controller"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want to enable PCIe controller support on Apple
+ system-on-chips, like the Apple M1. This is required for the USB
+ type-A ports, Ethernet, Wi-Fi, and Bluetooth.
+
+ If unsure, say Y if you have an Apple Silicon system.
+
+config PCI_VERSATILE
+ bool "ARM Versatile PB PCI controller"
+ depends on ARCH_VERSATILE || COMPILE_TEST
+
+config PCIE_BRCMSTB
+ tristate "Broadcom Brcmstb PCIe controller"
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
+ BMIPS_GENERIC || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ help
+ Say Y here to enable PCIe host controller support for
+ Broadcom STB based SoCs, like the Raspberry Pi 4.
+
+config PCIE_IPROC
+ tristate
+ help
+ This enables the iProc PCIe core controller support for Broadcom's
+ iProc family of SoCs. An appropriate bus interface driver needs
+ to be enabled to select this.
+
+config PCIE_IPROC_PLATFORM
+ tristate "Broadcom iProc PCIe platform bus driver"
+ depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
+ depends on OF
+ select PCIE_IPROC
+ default ARCH_BCM_IPROC
+ help
+ Say Y here if you want to use the Broadcom iProc PCIe controller
+ through the generic platform bus interface
+
+config PCIE_IPROC_BCMA
+ tristate "Broadcom iProc BCMA PCIe controller"
+ depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select PCIE_IPROC
+ select BCMA
+ default ARCH_BCM_5301X
+ help
+ Say Y here if you want to use the Broadcom iProc PCIe controller
+ through the BCMA bus interface
+
+config PCIE_IPROC_MSI
+ bool "Broadcom iProc PCIe MSI support"
+ depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
+ depends on PCI_MSI
+ default ARCH_BCM_IPROC
+ help
+ Say Y here if you want to enable MSI support for Broadcom's iProc
+ PCIe controller
+
+config PCI_HOST_THUNDER_PEM
+ bool "Cavium Thunder PCIe controller to off-chip devices"
+ depends on ARM64 || COMPILE_TEST
+ depends on OF || (ACPI && PCI_QUIRKS)
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+
+config PCI_HOST_THUNDER_ECAM
+ bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
+ depends on ARM64 || COMPILE_TEST
+ depends on OF || (ACPI && PCI_QUIRKS)
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+
+config PCI_FTPCI100
+ bool "Faraday Technology FTPCI100 PCI controller"
+ depends on OF
+ default ARCH_GEMINI
+
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
+
+config PCI_HOST_GENERIC
+ tristate "Generic PCI host controller"
+ depends on OF
+ select PCI_HOST_COMMON
+ select IRQ_DOMAIN
+ help
+ Say Y here if you want to support a simple generic PCI host
+ controller, such as the one emulated by kvmtool.
+
+config PCIE_HISI_ERR
+ depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon HIP PCIe controller error handling driver"
+ help
+ Say Y here if you want error handling support
+ for the PCIe controller's errors on HiSilicon HIP SoCs
+
+config PCI_IXP4XX
+ bool "Intel IXP4xx PCI controller"
+ depends on ARM && OF
+ depends on ARCH_IXP4XX || COMPILE_TEST
+ default ARCH_IXP4XX
+ help
+ Say Y here if you want support for the PCI host controller found
+ in the Intel IXP4xx XScale-based network processor SoC.
+
+config VMD
+ depends on PCI_MSI && X86_64 && !UML
+ tristate "Intel Volume Management Device Driver"
+ help
+ Adds support for the Intel Volume Management Device (VMD). VMD is a
+ secondary PCI host bridge that allows PCI Express root ports,
+ and devices attached to them, to be removed from the default
+ PCI domain and placed within the VMD domain. This provides
+ more bus resources than are otherwise possible with a
+ single domain. If you know your system provides one of these and
+ has devices attached to it, say Y; if you are not sure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmd.
+
+config PCI_LOONGSON
+ bool "LOONGSON PCIe controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on OF || ACPI
+ depends on PCI_QUIRKS
+ default MACH_LOONGSON64
+ help
+ Say Y here if you want to enable PCI controller support on
+ Loongson systems.
+
+config PCI_MVEBU
+ tristate "Marvell EBU PCIe controller"
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
+ depends on ARM
+ depends on OF
+ select PCI_BRIDGE_EMUL
+ help
+ Add support for Marvell EBU PCIe controller. This PCIe controller
+ is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370,
+ Armada XP, Armada 375, Armada 38x and Armada 39x.
+
+config PCIE_MEDIATEK
+ tristate "MediaTek PCIe controller"
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ help
+ Say Y here if you want to enable PCIe controller support on
+ MediaTek SoCs.
+
+config PCIE_MEDIATEK_GEN3
+ tristate "MediaTek Gen3 PCIe controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on PCI_MSI
+ help
+ Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
+ This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
+ and support up to 256 MSI interrupt numbers for
+ multi-function devices.
+
+ Say Y here if you want to enable Gen3 PCIe controller support on
+ MediaTek SoCs.
+
+config PCIE_MT7621
+ tristate "MediaTek MT7621 PCIe controller"
+ depends on SOC_MT7621 || COMPILE_TEST
+ select PHY_MT7621_PCI
+ default SOC_MT7621
+ help
+ This selects a driver for the MediaTek MT7621 PCIe Controller.
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCI_HYPERV_INTERFACE
+ tristate "Microsoft Hyper-V PCI Interface"
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
+ help
+ The Hyper-V PCI Interface is a helper driver that allows other
+ drivers to have a common interface with the Hyper-V PCI frontend
+ driver.
+
+config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PCI_MSI
+ help
+ Say Y here if you want support for the PCIe host controller found
+ on NVIDIA Tegra SoCs.
+
+config PCIE_RCAR_HOST
+ bool "Renesas R-Car PCIe controller (host mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in host
+ mode.
+
+config PCIE_RCAR_EP
+ bool "Renesas R-Car PCIe controller (endpoint mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in
+ endpoint mode.
+
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM
+ help
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ There are 3 internal PCI controllers available with a single
+ built-in EHCI/OHCI host controller present on each one.
+
+config PCIE_ROCKCHIP
+ bool
+ depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+ tristate "Rockchip PCIe controller (host mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want internal PCI support on Rockchip SoC.
+ There is 1 internal PCIe port available to support GEN2 with
+ 4 slots.
+
+config PCIE_ROCKCHIP_EP
+ bool "Rockchip PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want to support Rockchip PCIe controller in
+ endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+ available to support GEN2 with 4 slots.
+
+config PCI_V3_SEMI
+ bool "V3 Semiconductor PCI controller"
+ depends on OF
+ depends on ARM || COMPILE_TEST
+ default ARCH_INTEGRATOR_AP
+
+config PCI_XGENE
+ bool "X-Gene PCIe controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on OF || (ACPI && PCI_QUIRKS)
+ help
+ Say Y here if you want internal PCI support on APM X-Gene SoC.
+ There are 5 internal PCIe ports available. Each port is GEN3 capable
+ and have varied lanes from x1 to x8.
+
+config PCI_XGENE_MSI
+ bool "X-Gene v1 PCIe MSI feature"
+ depends on PCI_XGENE
+ depends on PCI_MSI
+ default y
+ help
+ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
+ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+
+config PCIE_XILINX
+ bool "Xilinx AXI PCIe controller"
+ depends on OF
+ depends on PCI_MSI
+ help
+ Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+ Host Bridge driver.
+
+config PCIE_XILINX_NWL
+ bool "Xilinx NWL PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on PCI_MSI
+ help
+ Say 'Y' here if you want kernel support for Xilinx
+ NWL PCIe controller. The controller can act as Root Port
+ or End Point. The current option selection will only
+ support root port enabling.
+
+config PCIE_XILINX_CPM
+ bool "Xilinx Versal CPM PCI controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select PCI_HOST_COMMON
+ help
+ Say 'Y' here if you want kernel support for the
+ Xilinx Versal CPM host bridge.
+
+source "drivers/pci/controller/cadence/Kconfig"
+source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/mobiveil/Kconfig"
+endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
new file mode 100644
index 000000000..37c8663de
--- /dev/null
+++ b/drivers/pci/controller/Makefile
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += cadence/
+obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
+obj-$(CONFIG_PCI_IXP4XX) += pci-ixp4xx.o
+obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
+obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
+obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
+obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
+obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
+obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
+obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
+obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
+obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
+obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
+obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
+obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
+obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
+obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
+obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
+obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
+obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
+obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_VMD) += vmd.o
+obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
+obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
+obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o
+obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o
+obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
+
+# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
+obj-y += dwc/
+obj-y += mobiveil/
+
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
+endif
+endif
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
new file mode 100644
index 000000000..291d12711
--- /dev/null
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Cadence-based PCIe controllers"
+ depends on PCI
+
+config PCIE_CADENCE
+ bool
+
+config PCIE_CADENCE_HOST
+ bool
+ depends on OF
+ select IRQ_DOMAIN
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_EP
+ bool
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_PLAT
+ bool
+
+config PCIE_CADENCE_PLAT_HOST
+ bool "Cadence platform PCIe controller (host mode)"
+ depends on OF
+ select PCIE_CADENCE_HOST
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ host mode. This PCIe controller may be embedded into many different
+ vendors SoCs.
+
+config PCIE_CADENCE_PLAT_EP
+ bool "Cadence platform PCIe controller (endpoint mode)"
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE_EP
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ endpoint mode. This PCIe controller may be embedded into many
+ different vendors SoCs.
+
+config PCI_J721E
+ bool
+
+config PCI_J721E_HOST
+ bool "TI J721E PCIe controller (host mode)"
+ depends on OF
+ select PCIE_CADENCE_HOST
+ select PCI_J721E
+ help
+ Say Y here if you want to support the TI J721E PCIe platform
+ controller in host mode. TI J721E PCIe controller uses Cadence PCIe
+ core.
+
+config PCI_J721E_EP
+ bool "TI J721E PCIe controller (endpoint mode)"
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE_EP
+ select PCI_J721E
+ help
+ Say Y here if you want to support the TI J721E PCIe platform
+ controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe
+ core.
+endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
new file mode 100644
index 000000000..9bac5fb2f
--- /dev/null
+++ b/drivers/pci/controller/cadence/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
+obj-$(CONFIG_PCI_J721E) += pci-j721e.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
new file mode 100644
index 000000000..2c87e7728
--- /dev/null
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pci-j721e - PCIe controller driver for TI's J721E SoCs
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "../../pci.h"
+#include "pcie-cadence.h"
+
+#define ENABLE_REG_SYS_2 0x108
+#define STATUS_REG_SYS_2 0x508
+#define STATUS_CLR_REG_SYS_2 0x708
+#define LINK_DOWN BIT(1)
+#define J7200_LINK_DOWN BIT(10)
+
+#define J721E_PCIE_USER_CMD_STATUS 0x4
+#define LINK_TRAINING_ENABLE BIT(0)
+
+#define J721E_PCIE_USER_LINKSTATUS 0x14
+#define LINK_STATUS GENMASK(1, 0)
+
+enum link_status {
+ NO_RECEIVERS_DETECTED,
+ LINK_TRAINING_IN_PROGRESS,
+ LINK_UP_DL_IN_PROGRESS,
+ LINK_UP_DL_COMPLETED,
+};
+
+#define J721E_MODE_RC BIT(7)
+#define LANE_COUNT_MASK BIT(8)
+#define LANE_COUNT(n) ((n) << 8)
+
+#define GENERATION_SEL_MASK GENMASK(1, 0)
+
+#define MAX_LANES 2
+
+struct j721e_pcie {
+ struct cdns_pcie *cdns_pcie;
+ struct clk *refclk;
+ u32 mode;
+ u32 num_lanes;
+ void __iomem *user_cfg_base;
+ void __iomem *intd_cfg_base;
+ u32 linkdown_irq_regfield;
+};
+
+enum j721e_pcie_mode {
+ PCI_MODE_RC,
+ PCI_MODE_EP,
+};
+
+struct j721e_pcie_data {
+ enum j721e_pcie_mode mode;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
+ u32 linkdown_irq_regfield;
+ unsigned int byte_access_allowed:1;
+};
+
+static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+{
+ return readl(pcie->user_cfg_base + offset);
+}
+
+static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->user_cfg_base + offset);
+}
+
+static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset)
+{
+ return readl(pcie->intd_cfg_base + offset);
+}
+
+static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->intd_cfg_base + offset);
+}
+
+static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
+{
+ struct j721e_pcie *pcie = priv;
+ struct device *dev = pcie->cdns_pcie->dev;
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
+ if (!(reg & pcie->linkdown_irq_regfield))
+ return IRQ_NONE;
+
+ dev_err(dev, "LINK DOWN!\n");
+
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
+ return IRQ_HANDLED;
+}
+
+static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
+}
+
+static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
+ reg |= LINK_TRAINING_ENABLE;
+ j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
+
+ return 0;
+}
+
+static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
+ reg &= ~LINK_TRAINING_ENABLE;
+ j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
+}
+
+static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
+ reg &= LINK_STATUS;
+ if (reg == LINK_UP_DL_COMPLETED)
+ return true;
+
+ return false;
+}
+
+static const struct cdns_pcie_ops j721e_pcie_ops = {
+ .start_link = j721e_pcie_start_link,
+ .stop_link = j721e_pcie_stop_link,
+ .link_up = j721e_pcie_link_up,
+};
+
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
+ unsigned int offset)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ u32 mask = J721E_MODE_RC;
+ u32 mode = pcie->mode;
+ u32 val = 0;
+ int ret = 0;
+
+ if (mode == PCI_MODE_RC)
+ val = J721E_MODE_RC;
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret)
+ dev_err(dev, "failed to set pcie mode\n");
+
+ return ret;
+}
+
+static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
+ struct regmap *syscon, unsigned int offset)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *np = dev->of_node;
+ int link_speed;
+ u32 val = 0;
+ int ret;
+
+ link_speed = of_pci_get_max_link_speed(np);
+ if (link_speed < 2)
+ link_speed = 2;
+
+ val = link_speed - 1;
+ ret = regmap_update_bits(syscon, offset, GENERATION_SEL_MASK, val);
+ if (ret)
+ dev_err(dev, "failed to set link speed\n");
+
+ return ret;
+}
+
+static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
+ struct regmap *syscon, unsigned int offset)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ u32 lanes = pcie->num_lanes;
+ u32 val = 0;
+ int ret;
+
+ val = LANE_COUNT(lanes - 1);
+ ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
+ if (ret)
+ dev_err(dev, "failed to set link count\n");
+
+ return ret;
+}
+
+static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-pcie-ctrl", 1,
+ 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = j721e_pcie_set_mode(pcie, syscon, offset);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set pci mode\n");
+ return ret;
+ }
+
+ ret = j721e_pcie_set_link_speed(pcie, syscon, offset);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set link speed\n");
+ return ret;
+ }
+
+ ret = j721e_pcie_set_lane_count(pcie, syscon, offset);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set num-lanes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (pci_is_root_bus(bus))
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (pci_is_root_bus(bus))
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static struct pci_ops cdns_ti_pcie_host_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = cdns_ti_pcie_config_read,
+ .write = cdns_ti_pcie_config_write,
+};
+
+static const struct j721e_pcie_data j721e_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
+};
+
+static const struct j721e_pcie_data j721e_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+};
+
+static const struct j721e_pcie_data j7200_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data j7200_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .quirk_detect_quiet_flag = true,
+ .quirk_disable_flr = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+};
+
+static const struct of_device_id of_j721e_pcie_match[] = {
+ {
+ .compatible = "ti,j721e-pcie-host",
+ .data = &j721e_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j721e-pcie-ep",
+ .data = &j721e_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,j7200-pcie-host",
+ .data = &j7200_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j7200-pcie-ep",
+ .data = &j7200_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-host",
+ .data = &am64_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-ep",
+ .data = &am64_pcie_ep_data,
+ },
+ {},
+};
+
+static int j721e_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pci_host_bridge *bridge;
+ const struct j721e_pcie_data *data;
+ struct cdns_pcie *cdns_pcie;
+ struct j721e_pcie *pcie;
+ struct cdns_pcie_rc *rc = NULL;
+ struct cdns_pcie_ep *ep = NULL;
+ struct gpio_desc *gpiod;
+ void __iomem *base;
+ struct clk *clk;
+ u32 num_lanes;
+ u32 mode;
+ int ret;
+ int irq;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (u32)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ ep->quirk_disable_flr = data->quirk_disable_flr;
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return 0;
+ }
+
+ pcie->mode = mode;
+ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ pcie->intd_cfg_base = base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "user_cfg");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ pcie->user_cfg_base = base;
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret || num_lanes > MAX_LANES)
+ num_lanes = 1;
+ pcie->num_lanes = num_lanes;
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
+ return -EINVAL;
+
+ irq = platform_get_irq_byname(pdev, "link_state");
+ if (irq < 0)
+ return irq;
+
+ dev_set_drvdata(dev, pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
+ "j721e-pcie-link-down-irq", pcie);
+ if (ret < 0) {
+ dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ goto err_get_sync;
+ }
+
+ j721e_pcie_config_link_irq(pcie);
+
+ switch (mode) {
+ case PCI_MODE_RC:
+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_init_phy(dev, cdns_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_get_sync;
+ }
+
+ clk = devm_clk_get_optional(dev, "pcie_refclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to get pcie_refclk\n");
+ goto err_pcie_setup;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pcie_refclk\n");
+ goto err_pcie_setup;
+ }
+ pcie->refclk = clk;
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ goto err_pcie_setup;
+ }
+
+ break;
+ case PCI_MODE_EP:
+ ret = cdns_pcie_init_phy(dev, cdns_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret < 0)
+ goto err_pcie_setup;
+
+ break;
+ }
+
+ return 0;
+
+err_pcie_setup:
+ cdns_pcie_disable_phy(cdns_pcie);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static void j721e_pcie_remove(struct platform_device *pdev)
+{
+ struct j721e_pcie *pcie = platform_get_drvdata(pdev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ struct device *dev = &pdev->dev;
+
+ clk_disable_unprepare(pcie->refclk);
+ cdns_pcie_disable_phy(cdns_pcie);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+}
+
+static struct platform_driver j721e_pcie_driver = {
+ .probe = j721e_pcie_probe,
+ .remove_new = j721e_pcie_remove,
+ .driver = {
+ .name = "j721e-pcie",
+ .of_match_table = of_j721e_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(j721e_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
new file mode 100644
index 000000000..b8b655d40
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe endpoint controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+#include "pcie-cadence.h"
+
+#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
+#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
+#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
+
+static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
+
+ if (vfn == 0)
+ return fn;
+
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+
+ return fn;
+}
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_header *hdr)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return -EINVAL;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
+
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
+
+ /*
+ * Vendor ID can only be modified from function 0, all other functions
+ * use the same vendor ID as function 0.
+ */
+ if (fn == 0) {
+ /* Update the vendor IDs. */
+ u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
+ CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+ }
+
+ return 0;
+}
+
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie_epf *epf = &ep->epf[fn];
+ struct cdns_pcie *pcie = &ep->pcie;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = sz > SZ_2G;
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ if (is_64bits && is_prefetch)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
+ addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
+ addr1);
+
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+ epf->epf_bar[bar] = epf_bar;
+
+ return 0;
+}
+
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie_epf *epf = &ep->epf[fn];
+ struct cdns_pcie *pcie = &ep->pcie;
+ enum pci_barno bar = epf_bar->barno;
+ u32 reg, cfg, b, ctrl;
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+ epf->epf_bar[bar] = NULL;
+}
+
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 r;
+
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
+ if (r >= ep->max_regions - 1) {
+ dev_err(&epc->dev, "no free outbound region\n");
+ return -EINVAL;
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+ cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
+
+ set_bit(r, &ep->ob_region_map);
+ ep->ob_addr[r] = addr;
+
+ return 0;
+}
+
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 r;
+
+ for (r = 0; r < ep->max_regions - 1; r++)
+ if (ep->ob_addr[r] == addr)
+ break;
+
+ if (r == ep->max_regions - 1)
+ return;
+
+ cdns_pcie_reset_outbound_region(pcie, r);
+
+ ep->ob_addr[r] = 0;
+ clear_bit(r, &ep->ob_region_map);
+}
+
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u16 flags;
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /*
+ * Set the Multiple Message Capable bitfield into the Message Control
+ * register.
+ */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
+ flags |= PCI_MSI_FLAGS_64BIT;
+ flags &= ~PCI_MSI_FLAGS_MASKBIT;
+ cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
+
+ return 0;
+}
+
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u16 flags, mme;
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Validate that the MSI feature is actually enabled. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /*
+ * Get the Multiple Message Enable bitfield from the Message Control
+ * register.
+ */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return mme;
+}
+
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 val, reg;
+
+ func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
+
+ reg = cap + PCI_MSIX_FLAGS;
+ val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
+ return val;
+}
+
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 val, reg;
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ reg = cap + PCI_MSIX_FLAGS;
+ val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
+
+ /* Set MSIX BAR and offset */
+ reg = cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
+
+ /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ reg = cap + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
+
+ return 0;
+}
+
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
+{
+ struct cdns_pcie *pcie = &ep->pcie;
+ unsigned long flags;
+ u32 offset;
+ u16 status;
+ u8 msg_code;
+
+ intx &= 3;
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
+ ep->irq_pci_fn != fn)) {
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
+ ep->irq_phys_addr);
+ ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
+ ep->irq_pci_fn = fn;
+ }
+
+ if (is_asserted) {
+ ep->irq_pending |= BIT(intx);
+ msg_code = MSG_CODE_ASSERT_INTA + intx;
+ } else {
+ ep->irq_pending &= ~BIT(intx);
+ msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ }
+
+ spin_lock_irqsave(&ep->lock, flags);
+ status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
+ if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
+ status ^= PCI_STATUS_INTERRUPT;
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
+ }
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
+ CDNS_PCIE_MSG_NO_DATA;
+ writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
+{
+ u16 cmd;
+
+ cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ return -EINVAL;
+
+ cdns_pcie_ep_assert_intx(ep, fn, intx, true);
+ /*
+ * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+ */
+ mdelay(1);
+ cdns_pcie_ep_assert_intx(ep, fn, intx, false);
+ return 0;
+}
+
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 interrupt_num)
+{
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ u64 pci_addr, pci_addr_mask = 0xff;
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn)) {
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ pci_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+ writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+
+ return 0;
+}
+
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ struct cdns_pcie *pcie = &ep->pcie;
+ u64 pci_addr, pci_addr_mask = 0xff;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ int ret;
+ int i;
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = data & ~data_mask;
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ for (i = 0; i < interrupt_num; i++) {
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
+ pci_addr & ~pci_addr_mask,
+ entry_size);
+ if (ret)
+ return ret;
+ addr = addr + entry_size;
+ }
+
+ *msi_data = data;
+ *msi_addr_offset = pci_addr & pci_addr_mask;
+
+ return 0;
+}
+
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u16 interrupt_num)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 tbl_offset, msg_data, reg;
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct cdns_pcie_epf *epf;
+ u64 pci_addr_mask = 0xff;
+ u64 msg_addr;
+ u16 flags;
+ u8 bir;
+
+ epf = &ep->epf[fn];
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Check whether the MSI-X feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
+ if (!(flags & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ reg = cap + PCI_MSIX_TABLE;
+ tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
+ bir = tbl_offset & PCI_MSIX_TABLE_BIR;
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+ msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+
+ /* Set the outbound region if needed. */
+ if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn) {
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ msg_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+ writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
+
+ return 0;
+}
+
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
+
+ case PCI_EPC_IRQ_MSI:
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int cdns_pcie_ep_start(struct pci_epc *epc)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
+ int max_epfs = sizeof(epc->function_num_map) * 8;
+ int ret, value, epf;
+
+ /*
+ * BIT(0) is hardwired to 1, hence function 0 is always enabled
+ * and can't be disabled anyway.
+ */
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+
+ if (ep->quirk_disable_flr) {
+ for (epf = 0; epf < max_epfs; epf++) {
+ if (!(epc->function_num_map & BIT(epf)))
+ continue;
+
+ value = cdns_pcie_ep_fn_readl(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP);
+ value &= ~PCI_EXP_DEVCAP_FLR;
+ cdns_pcie_ep_fn_writel(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP, value);
+ }
+ }
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
+static const struct pci_epc_features cdns_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 256,
+};
+
+static const struct pci_epc_features*
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
+}
+
+static const struct pci_epc_ops cdns_pcie_epc_ops = {
+ .write_header = cdns_pcie_ep_write_header,
+ .set_bar = cdns_pcie_ep_set_bar,
+ .clear_bar = cdns_pcie_ep_clear_bar,
+ .map_addr = cdns_pcie_ep_map_addr,
+ .unmap_addr = cdns_pcie_ep_unmap_addr,
+ .set_msi = cdns_pcie_ep_set_msi,
+ .get_msi = cdns_pcie_ep_get_msi,
+ .set_msix = cdns_pcie_ep_set_msix,
+ .get_msix = cdns_pcie_ep_get_msix,
+ .raise_irq = cdns_pcie_ep_raise_irq,
+ .map_msi_irq = cdns_pcie_ep_map_msi_irq,
+ .start = cdns_pcie_ep_start,
+ .get_features = cdns_pcie_ep_get_features,
+};
+
+
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
+ struct resource *res;
+ struct pci_epc *epc;
+ int ret;
+ int i;
+
+ pcie->is_rc = false;
+
+ pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(pcie->reg_base)) {
+ dev_err(dev, "missing \"reg\"\n");
+ return PTR_ERR(pcie->reg_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+ if (!res) {
+ dev_err(dev, "missing \"mem\"\n");
+ return -EINVAL;
+ }
+ pcie->mem_res = res;
+
+ ep->max_regions = CDNS_PCIE_MAX_OB;
+ of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
+
+ ep->ob_addr = devm_kcalloc(dev,
+ ep->max_regions, sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+ if (!ep->ob_addr)
+ return -ENOMEM;
+
+ /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
+
+ epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ epc_set_drvdata(epc, ep);
+
+ if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
+ epc->max_functions = 1;
+
+ ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
+ GFP_KERNEL);
+ if (!ep->epf)
+ return -ENOMEM;
+
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
+ ret = pci_epc_mem_init(epc, pcie->mem_res->start,
+ resource_size(pcie->mem_res), PAGE_SIZE);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ return ret;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ SZ_128K);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ ret = -ENOMEM;
+ goto free_epc_mem;
+ }
+ ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+ /* Reserve region 0 for IRQs */
+ set_bit(0, &ep->ob_region_map);
+
+ if (ep->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+
+ spin_lock_init(&ep->lock);
+
+ return 0;
+
+ free_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
+}
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
new file mode 100644
index 000000000..5b14f7ee3
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe host controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-cadence.h"
+
+#define LINK_RETRAIN_TIMEOUT HZ
+
+static u64 bar_max_size[] = {
+ [RP_BAR0] = _ULL(128 * SZ_2G),
+ [RP_BAR1] = SZ_2G,
+ [RP_NO_BAR] = _BITULL(63),
+};
+
+static u8 bar_aperture_mask[] = {
+ [RP_BAR0] = 0x1F,
+ [RP_BAR1] = 0xF,
+};
+
+void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
+ struct cdns_pcie *pcie = &rc->pcie;
+ unsigned int busn = bus->number;
+ u32 addr0, desc0;
+
+ if (pci_is_root_bus(bus)) {
+ /*
+ * Only the root port (devfn == 0) is connected to this bus.
+ * All other PCI devices are behind some bridge hence on another
+ * bus.
+ */
+ if (devfn)
+ return NULL;
+
+ return pcie->reg_base + (where & 0xfff);
+ }
+ /* Check that the link is up */
+ if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
+ return NULL;
+ /* Clear AXI link-down status */
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
+
+ /* Update Output registers for AXI region 0. */
+ addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
+ CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
+ CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
+
+ /* Configuration Type 0 or Type 1 access. */
+ desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+ CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+ /*
+ * The bus number was already set once for all in desc1 by
+ * cdns_pcie_host_init_address_translation().
+ */
+ if (busn == bridge->busnr + 1)
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
+ else
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
+
+ return rc->cfg_base + (where & 0xfff);
+}
+
+static struct pci_ops cdns_pcie_host_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
+{
+ u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ unsigned long end_jiffies;
+ u16 lnk_stat;
+
+ /* Wait for link training to complete. Exit after timeout. */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ break;
+ usleep_range(0, 1000);
+ } while (time_before(jiffies, end_jiffies));
+
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (cdns_pcie_link_up(pcie)) {
+ dev_info(dev, "Link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cdns_pcie_retrain(struct cdns_pcie *pcie)
+{
+ u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ u16 lnk_stat, lnk_ctl;
+ int ret = 0;
+
+ /*
+ * Set retrain bit if current speed is 2.5 GB/s,
+ * but the PCIe root port support is > 2.5 GB/s.
+ */
+
+ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
+ PCI_EXP_LNKCAP));
+ if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return ret;
+
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ lnk_ctl = cdns_pcie_rp_readw(pcie,
+ pcie_cap_off + PCI_EXP_LNKCTL);
+ lnk_ctl |= PCI_EXP_LNKCTL_RL;
+ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ lnk_ctl);
+
+ ret = cdns_pcie_host_training_complete(pcie);
+ if (ret)
+ return ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie);
+ }
+ return ret;
+}
+
+static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
+static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ int ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie);
+
+ /*
+ * Retrain link for Gen2 training defect
+ * if quirk flag is set.
+ */
+ if (!ret && rc->quirk_retrain_flag)
+ ret = cdns_pcie_retrain(pcie);
+
+ return ret;
+}
+
+static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+ u32 id;
+
+ /*
+ * Set the root complex BAR configuration register:
+ * - disable both BAR0 and BAR1.
+ * - enable Prefetchable Memory Base and Limit registers in type 1
+ * config space (64 bits).
+ * - enable IO Base and Limit registers in type 1 config
+ * space (32 bits).
+ */
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+
+ /* Set root port configuration space */
+ if (rc->vendor_id != 0xffff) {
+ id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
+ CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+ }
+
+ if (rc->device_id != 0xffff)
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ return 0;
+}
+
+static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 addr0, addr1, aperture, value;
+
+ if (!rc->avail_ib_bar[bar])
+ return -EBUSY;
+
+ rc->avail_ib_bar[bar] = false;
+
+ aperture = ilog2(size);
+ addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
+
+ if (bar == RP_NO_BAR)
+ return 0;
+
+ value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
+ value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ if (size + cpu_addr >= SZ_4G) {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
+ } else {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
+ }
+
+ value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+
+ return 0;
+}
+
+static enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size <= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] < bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+
+static enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size >= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] > bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+
+static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry)
+{
+ u64 cpu_addr, pci_addr, size, winsize;
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ enum cdns_pcie_rp_bar bar;
+ unsigned long flags;
+ int ret;
+
+ cpu_addr = entry->res->start;
+ pci_addr = entry->res->start - entry->offset;
+ flags = entry->res->flags;
+ size = resource_size(entry->res);
+
+ if (entry->offset) {
+ dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
+ pci_addr, cpu_addr);
+ return -EINVAL;
+ }
+
+ while (size > 0) {
+ /*
+ * Try to find a minimum BAR whose size is greater than
+ * or equal to the remaining resource_entry size. This will
+ * fail if the size of each of the available BARs is less than
+ * the remaining resource_entry size.
+ * If a minimum BAR is found, IB ATU will be configured and
+ * exited.
+ */
+ bar = cdns_pcie_host_find_min_bar(rc, size);
+ if (bar != RP_BAR_UNDEFINED) {
+ ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
+ size, flags);
+ if (ret)
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ /*
+ * If the control reaches here, it would mean the remaining
+ * resource_entry size cannot be fitted in a single BAR. So we
+ * find a maximum BAR whose size is less than or equal to the
+ * remaining resource_entry size and split the resource entry
+ * so that part of resource entry is fitted inside the maximum
+ * BAR. The remaining size would be fitted during the next
+ * iteration of the loop.
+ * If a maximum BAR is not found, there is no way we can fit
+ * this resource_entry, so we error out.
+ */
+ bar = cdns_pcie_host_find_max_bar(rc, size);
+ if (bar == RP_BAR_UNDEFINED) {
+ dev_err(dev, "No free BAR to map cpu_addr %llx\n",
+ cpu_addr);
+ return -EINVAL;
+ }
+
+ winsize = bar_max_size[bar];
+ ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
+ flags);
+ if (ret) {
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ size -= winsize;
+ cpu_addr += winsize;
+ }
+
+ return 0;
+}
+
+static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct resource_entry *entry1, *entry2;
+
+ entry1 = container_of(a, struct resource_entry, node);
+ entry2 = container_of(b, struct resource_entry, node);
+
+ return resource_size(entry2->res) - resource_size(entry1->res);
+}
+
+static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ u32 no_bar_nbits = 32;
+ int err;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (list_empty(&bridge->dma_ranges)) {
+ of_property_read_u32(np, "cdns,no-bar-match-nbits",
+ &no_bar_nbits);
+ err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
+ (u64)1 << no_bar_nbits, 0);
+ if (err)
+ dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
+ return err;
+ }
+
+ list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = cdns_pcie_host_bar_config(rc, entry);
+ if (err) {
+ dev_err(dev, "Fail to configure IB using dma-ranges\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource *cfg_res = rc->cfg_res;
+ struct resource_entry *entry;
+ u64 cpu_addr = cfg_res->start;
+ u32 addr0, addr1, desc1;
+ int r, busnr = 0;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ /*
+ * Reserve region 0 for PCI configure space accesses:
+ * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
+ * cdns_pci_map_bus(), other region registers are set here once for all.
+ */
+ addr1 = 0; /* Should be programmed to zero. */
+ desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
+
+ if (pcie->ops->cpu_addr_fixup)
+ cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
+
+ addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
+
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ struct resource *res = entry->res;
+ u64 pci_addr = res->start - entry->offset;
+
+ if (resource_type(res) == IORESOURCE_IO)
+ cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
+ true,
+ pci_pio_to_address(res->start),
+ pci_addr,
+ resource_size(res));
+ else
+ cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
+ false,
+ res->start,
+ pci_addr,
+ resource_size(res));
+
+ r++;
+ }
+
+ return cdns_pcie_host_map_dma_ranges(rc);
+}
+
+static int cdns_pcie_host_init(struct device *dev,
+ struct cdns_pcie_rc *rc)
+{
+ int err;
+
+ err = cdns_pcie_host_init_root_port(rc);
+ if (err)
+ return err;
+
+ return cdns_pcie_host_init_address_translation(rc);
+}
+
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ enum cdns_pcie_rp_bar bar;
+ struct cdns_pcie *pcie;
+ struct resource *res;
+ int ret;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = &rc->pcie;
+ pcie->is_rc = true;
+
+ rc->vendor_id = 0xffff;
+ of_property_read_u32(np, "vendor-id", &rc->vendor_id);
+
+ rc->device_id = 0xffff;
+ of_property_read_u32(np, "device-id", &rc->device_id);
+
+ pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(pcie->reg_base)) {
+ dev_err(dev, "missing \"reg\"\n");
+ return PTR_ERR(pcie->reg_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rc->cfg_base))
+ return PTR_ERR(rc->cfg_base);
+ rc->cfg_res = res;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(dev, rc);
+ if (ret)
+ return ret;
+
+ if (!bridge->ops)
+ bridge->ops = &cdns_pcie_host_ops;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0)
+ goto err_init;
+
+ return 0;
+
+ err_init:
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
new file mode 100644
index 000000000..371ffc1f0
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe platform driver.
+ *
+ * Copyright (c) 2019, Cadence Design Systems
+ * Author: Tom Joseph <tjoseph@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include "pcie-cadence.h"
+
+#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF
+
+/**
+ * struct cdns_plat_pcie - private data for this PCIe platform driver
+ * @pcie: Cadence PCIe controller
+ * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
+ * if 0 it is in Endpoint mode.
+ */
+struct cdns_plat_pcie {
+ struct cdns_pcie *pcie;
+ bool is_rc;
+};
+
+struct cdns_plat_pcie_of_data {
+ bool is_rc;
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[];
+
+static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr)
+{
+ return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR;
+}
+
+static const struct cdns_pcie_ops cdns_plat_ops = {
+ .cpu_addr_fixup = cdns_plat_cpu_addr_fixup,
+};
+
+static int cdns_plat_pcie_probe(struct platform_device *pdev)
+{
+ const struct cdns_plat_pcie_of_data *data;
+ struct cdns_plat_pcie *cdns_plat_pcie;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+ int phy_count;
+ bool is_rc;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ is_rc = data->is_rc;
+
+ pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
+ cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
+ if (!cdns_plat_pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cdns_plat_pcie);
+ if (is_rc) {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ rc = pci_host_bridge_priv(bridge);
+ rc->pcie.dev = dev;
+ rc->pcie.ops = &cdns_plat_ops;
+ cdns_plat_pcie->pcie = &rc->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret)
+ goto err_init;
+ } else {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pcie.dev = dev;
+ ep->pcie.ops = &cdns_plat_ops;
+ cdns_plat_pcie->pcie = &ep->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret)
+ goto err_init;
+ }
+
+ return 0;
+
+ err_init:
+ err_get_sync:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
+ phy_count = cdns_plat_pcie->pcie->phy_count;
+ while (phy_count--)
+ device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
+
+ return 0;
+}
+
+static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
+ .is_rc = true,
+};
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
+ .is_rc = false,
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[] = {
+ {
+ .compatible = "cdns,cdns-pcie-host",
+ .data = &cdns_plat_pcie_host_of_data,
+ },
+ {
+ .compatible = "cdns,cdns-pcie-ep",
+ .data = &cdns_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver cdns_plat_pcie_driver = {
+ .driver = {
+ .name = "cdns-pcie",
+ .of_match_table = cdns_plat_pcie_of_match,
+ .pm = &cdns_pcie_pm_ops,
+ },
+ .probe = cdns_plat_pcie_probe,
+ .shutdown = cdns_plat_pcie_shutdown,
+};
+builtin_platform_driver(cdns_plat_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
new file mode 100644
index 000000000..4251fac5e
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include "pcie-cadence.h"
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /*
+ * Set the LTSSM Detect Quiet state min. delay to 2ms.
+ */
+ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
+}
+
+void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size)
+{
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ u64 sz = 1ULL << fls64(size - 1);
+ int nbits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1;
+
+ if (nbits < 8)
+ nbits = 8;
+
+ /* Set the PCI address */
+ addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
+ (lower_32_bits(pci_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(pci_addr);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
+
+ /* Set the PCIe header descriptor */
+ if (is_io)
+ desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
+ else
+ desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
+ desc1 = 0;
+
+ /*
+ * Whatever Bit [23] is set or not inside DESC0 register of the outbound
+ * PCIe descriptor, the PCI function number must be set into
+ * Bits [26:24] of DESC0 anyway.
+ *
+ * In Root Complex mode, the function number is always 0 but in Endpoint
+ * mode, the PCIe controller may support more than one function. This
+ * function number needs to be set properly into the outbound PCIe
+ * descriptor.
+ *
+ * Besides, setting Bit [23] is mandatory when in Root Complex mode:
+ * then the driver must provide the bus, resp. device, number in
+ * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
+ * number, the device number is always 0 in Root Complex mode.
+ *
+ * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
+ * the PCIe controller will use the captured values for the bus and
+ * device numbers.
+ */
+ if (pcie->is_rc) {
+ /* The device and function numbers are always 0. */
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+ CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+ desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
+ } else {
+ /*
+ * Use captured values for bus and device numbers but still
+ * need to set the function number.
+ */
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
+ }
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
+
+ /* Set the CPU address */
+ if (pcie->ops->cpu_addr_fixup)
+ cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
+
+ addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
+}
+
+void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr)
+{
+ u32 addr0, addr1, desc0, desc1;
+
+ desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
+ desc1 = 0;
+
+ /* See cdns_pcie_set_outbound_region() comments above. */
+ if (pcie->is_rc) {
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+ CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+ desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
+ } else {
+ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
+ }
+
+ /* Set the CPU address */
+ if (pcie->ops->cpu_addr_fixup)
+ cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
+
+ addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
+}
+
+void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
+{
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
+}
+
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
+{
+ int i = pcie->phy_count;
+
+ while (i--) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(pcie->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+
+ return ret;
+}
+
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
+{
+ struct device_node *np = dev->of_node;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ int i;
+ int ret;
+ const char *name;
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 1) {
+ dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ pcie->phy_count = 0;
+ return 0;
+ }
+
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < phy_count; i++) {
+ of_property_read_string_index(np, "phy-names", i, &name);
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_phy;
+ }
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ devm_phy_put(dev, phy[i]);
+ ret = -EINVAL;
+ goto err_phy;
+ }
+ }
+
+ pcie->phy_count = phy_count;
+ pcie->phy = phy;
+ pcie->link = link;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret)
+ goto err_phy;
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ device_link_del(link[i]);
+ devm_phy_put(dev, phy[i]);
+ }
+
+ return ret;
+}
+
+static int cdns_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int cdns_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops cdns_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
+};
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
new file mode 100644
index 000000000..373cb50fc
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2017 Cadence
+// Cadence PCIe controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#ifndef _PCIE_CADENCE_H
+#define _PCIE_CADENCE_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/phy/phy.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/*
+ * Local Management Registers
+ */
+#define CDNS_PCIE_LM_BASE 0x00100000
+
+/* Vendor ID Register */
+#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
+#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
+#define CDNS_PCIE_LM_ID_VENDOR(vid) \
+ (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
+#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
+#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
+#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
+ (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
+
+/* Root Port Requester ID Register */
+#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
+#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_RP_RID_SHIFT 0
+#define CDNS_PCIE_LM_RP_RID_(rid) \
+ (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
+
+/* Endpoint Bus and Device Number Register */
+#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
+#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
+#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
+#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
+#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
+
+/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
+#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
+ (((aperture) - 2) << ((bar) * 8))
+
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
+/*
+ * Endpoint Function Registers (PCI configuration space for endpoint functions)
+ */
+#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+
+#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
+#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
+
+/*
+ * Root Port Registers (PCI configuration space for the root port function)
+ */
+#define CDNS_PCIE_RP_BASE 0x00200000
+#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
+
+/*
+ * Address Translation Registers
+ */
+#define CDNS_PCIE_AT_BASE 0x00400000
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+
+/* Region r Outbound PCIe Descriptor Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
+/* Bit 23 MUST be set in RC mode. */
+#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+
+/* Region r Outbound PCIe Descriptor Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
+ (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
+ ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
+enum cdns_pcie_rp_bar {
+ RP_BAR_UNDEFINED = -1,
+ RP_BAR0,
+ RP_BAR1,
+ RP_NO_BAR
+};
+
+#define CDNS_PCIE_RP_MAX_IB 0x3
+#define CDNS_PCIE_MAX_OB 32
+
+struct cdns_pcie_rp_ib_bar {
+ u64 size;
+ bool free;
+};
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+
+/* Normal/Vendor specific message access: offset inside some outbound region */
+#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
+#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
+ (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
+#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
+#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+
+struct cdns_pcie;
+
+enum cdns_pcie_msg_code {
+ MSG_CODE_ASSERT_INTA = 0x20,
+ MSG_CODE_ASSERT_INTB = 0x21,
+ MSG_CODE_ASSERT_INTC = 0x22,
+ MSG_CODE_ASSERT_INTD = 0x23,
+ MSG_CODE_DEASSERT_INTA = 0x24,
+ MSG_CODE_DEASSERT_INTB = 0x25,
+ MSG_CODE_DEASSERT_INTC = 0x26,
+ MSG_CODE_DEASSERT_INTD = 0x27,
+};
+
+enum cdns_pcie_msg_routing {
+ /* Route to Root Complex */
+ MSG_ROUTING_TO_RC,
+
+ /* Use Address Routing */
+ MSG_ROUTING_BY_ADDR,
+
+ /* Use ID Routing */
+ MSG_ROUTING_BY_ID,
+
+ /* Route as Broadcast Message from Root Complex */
+ MSG_ROUTING_BCAST,
+
+ /* Local message; terminate at receiver (INTx messages) */
+ MSG_ROUTING_LOCAL,
+
+ /* Gather & route to Root Complex (PME_TO_Ack message) */
+ MSG_ROUTING_GATHER,
+};
+
+struct cdns_pcie_ops {
+ int (*start_link)(struct cdns_pcie *pcie);
+ void (*stop_link)(struct cdns_pcie *pcie);
+ bool (*link_up)(struct cdns_pcie *pcie);
+ u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
+};
+
+/**
+ * struct cdns_pcie - private data for Cadence PCIe controller drivers
+ * @reg_base: IO mapped register base
+ * @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @dev: PCIe controller
+ * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
+ * @phy_count: number of supported PHY devices
+ * @phy: list of pointers to specific PHY control blocks
+ * @link: list of pointers to corresponding device link representations
+ * @ops: Platform-specific ops to control various inputs from Cadence PCIe
+ * wrapper
+ */
+struct cdns_pcie {
+ void __iomem *reg_base;
+ struct resource *mem_res;
+ struct device *dev;
+ bool is_rc;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ const struct cdns_pcie_ops *ops;
+};
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ * configuration space accesses
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ * single function at a time
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * available
+ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ */
+struct cdns_pcie_rc {
+ struct cdns_pcie pcie;
+ struct resource *cfg_res;
+ void __iomem *cfg_base;
+ u32 vendor_id;
+ u32 device_id;
+ bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+};
+
+/**
+ * struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
+ * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
+ */
+struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
+};
+
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ * @lock: spin lock to disable interrupts while modifying PCIe controller
+ * registers fields (RMW) accessible by both remote RC and EP to
+ * minimize time between read and write
+ * @epf: Structure to hold info about endpoint function
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
+ */
+struct cdns_pcie_ep {
+ struct cdns_pcie pcie;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+ /* protect writing to PCI_STATUS while raising legacy interrupts */
+ spinlock_t lock;
+ struct cdns_pcie_epf *epf;
+ unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
+};
+
+
+/* Register access */
+static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
+{
+ writel(value, pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
+{
+ return readl(pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
+{
+ void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
+ unsigned int offset = (unsigned long)addr & 0x3;
+ u32 val = readl(aligned_addr);
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ pr_warn("Address %p and size %d are not aligned\n", addr, size);
+ return 0;
+ }
+
+ if (size > 2)
+ return val;
+
+ return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
+}
+
+static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
+{
+ void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
+ unsigned int offset = (unsigned long)addr & 0x3;
+ u32 mask;
+ u32 val;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ pr_warn("Address %p and size %d are not aligned\n", addr, size);
+ return;
+ }
+
+ if (size > 2) {
+ writel(value, addr);
+ return;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
+ val = readl(aligned_addr) & mask;
+ val |= value << (offset * 8);
+ writel(val, aligned_addr);
+}
+
+/* Root Port register access */
+static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
+ u32 reg, u8 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
+}
+
+static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
+ u32 reg, u16 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
+/* Endpoint Function register access */
+static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u8 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
+}
+
+static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u16 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u32 value)
+{
+ writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
+static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+ return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
+{
+ if (pcie->ops->start_link)
+ return pcie->ops->start_link(pcie);
+
+ return 0;
+}
+
+static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
+{
+ if (pcie->ops->stop_link)
+ pcie->ops->stop_link(pcie);
+}
+
+static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
+{
+ if (pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return true;
+}
+
+#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+#else
+static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_PCIE_CADENCE_EP
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+#else
+static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+#endif
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+
+void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size);
+
+void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr);
+
+void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+extern const struct dev_pm_ops cdns_pcie_pm_ops;
+
+#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
new file mode 100644
index 000000000..ab96da43e
--- /dev/null
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -0,0 +1,418 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "DesignWare-based PCIe controllers"
+ depends on PCI
+
+config PCIE_DW
+ bool
+
+config PCIE_DW_HOST
+ bool
+ select PCIE_DW
+
+config PCIE_DW_EP
+ bool
+ select PCIE_DW
+
+config PCIE_AL
+ bool "Amazon Annapurna Labs PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_ECAM
+ help
+ Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+ controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+ core plus Annapurna Labs proprietary hardware wrappers. This is
+ required only for DT-based platforms. ACPI platforms with the
+ Annapurna Labs PCIe controller don't need to enable this.
+
+config PCI_MESON
+ tristate "Amlogic Meson PCIe controller"
+ default m if ARCH_MESON
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
+
+config PCIE_ARTPEC6
+ bool
+
+config PCIE_ARTPEC6_HOST
+ bool "Axis ARTPEC-6 PCIe controller (host mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_ARTPEC6
+ help
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ host mode. This uses the DesignWare core.
+
+config PCIE_ARTPEC6_EP
+ bool "Axis ARTPEC-6 PCIe controller (endpoint mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ARTPEC6
+ help
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ endpoint mode. This uses the DesignWare core.
+
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+
+config PCI_IMX6
+ bool
+
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller (host mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in Root Complex mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller (endpoint mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in endpoint mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_LAYERSCAPE
+ bool "Freescale Layerscape PCIe controller (host mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select MFD_SYSCON
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Host mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_LAYERSCAPE_EP
+ bool "Freescale Layerscape PCIe controller (endpoint mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Endpoint mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_HISI
+ depends on OF && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon Hip05 and Hip06 SoCs PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe controller support on HiSilicon
+ Hip05 and Hip06 SoCs
+
+config PCIE_KIRIN
+ depends on OF && (ARM64 || COMPILE_TEST)
+ tristate "HiSilicon Kirin PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select REGMAP_MMIO
+ help
+ Say Y here if you want PCIe controller support
+ on HiSilicon Kirin series SoCs.
+
+config PCIE_HISI_STB
+ bool "HiSilicon STB PCIe controller"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
+config PCIE_INTEL_GW
+ bool "Intel Gateway PCIe controller "
+ depends on OF && (X86 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say 'Y' here to enable PCIe Host controller support on Intel
+ Gateway SoCs.
+ The PCIe controller uses the DesignWare core plus Intel-specific
+ hardware wrappers.
+
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller (host mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller (endpoint mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_TEGRA194
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (host mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (endpoint mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in endpoint mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_DW_PLAT
+ bool
+
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe controller (host mode)"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe controller (endpoint mode)"
+ depends on PCI && PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_QCOM
+ bool "Qualcomm PCIe controller (host mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select CRC8
+ help
+ Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+ PCIe controller uses the DesignWare core plus Qualcomm-specific
+ hardware wrappers.
+
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller (endpoint mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
+config PCI_EXYNOS
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
+
+config PCIE_FU740
+ bool "SiFive FU740 PCIe controller"
+ depends on PCI_MSI
+ depends on SOC_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
+
+config PCIE_UNIPHIER
+ bool "Socionext UniPhier PCIe controller (host mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
+ This driver supports LD20 and PXs3 SoCs.
+
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe controller (endpoint mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
+config PCIE_SPEAR13XX
+ bool "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCI_DRA7XX
+ tristate
+
+config PCI_DRA7XX_HOST
+ tristate "TI DRA7xx PCIe controller (host mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_DRA7XX
+ default y if SOC_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+config PCI_DRA7XX_EP
+ tristate "TI DRA7xx PCIe controller (endpoint mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+config PCI_KEYSTONE
+ bool
+
+config PCI_KEYSTONE_HOST
+ bool "TI Keystone PCIe controller (host mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ bool "TI Keystone PCIe controller (endpoint mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controller"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
+
+endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
new file mode 100644
index 000000000..bf5c31187
--- /dev/null
+++ b/drivers/pci/controller/dwc/Makefile
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
+obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
+obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
+obj-$(CONFIG_ARM64) += pcie-al.o
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
+endif
+endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
new file mode 100644
index 000000000..b445ffe95
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
+#define ERR_SYS BIT(0)
+#define ERR_FATAL BIT(1)
+#define ERR_NONFATAL BIT(2)
+#define ERR_COR BIT(3)
+#define ERR_AXI BIT(4)
+#define ERR_ECRC BIT(5)
+#define PME_TURN_OFF BIT(8)
+#define PME_TO_ACK BIT(9)
+#define PM_PME BIT(10)
+#define LINK_REQ_RST BIT(11)
+#define LINK_UP_EVT BIT(12)
+#define CFG_BME_EVT BIT(13)
+#define CFG_MSE_EVT BIT(14)
+#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+ ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+ LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
+#define INTA BIT(0)
+#define INTB BIT(1)
+#define INTC BIT(2)
+#define INTD BIT(3)
+#define MSI BIT(4)
+#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
+#define DEVICE_TYPE_EP 0x0
+#define DEVICE_TYPE_LEG_EP 0x1
+#define DEVICE_TYPE_RC 0x4
+
+#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
+#define LTSSM_EN 0x1
+
+#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
+#define LINK_UP BIT(16)
+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
+
+#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
+#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
+
+#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
+#define MSI_REQ_GRANT BIT(0)
+#define MSI_VECTOR_SHIFT 7
+
+#define PCIE_1LANE_2LANE_SELECTION BIT(13)
+#define PCIE_B1C0_MODE_SEL BIT(2)
+#define PCIE_B0_B1_TSYNCEN BIT(0)
+
+struct dra7xx_pcie {
+ struct dw_pcie *pci;
+ void __iomem *base; /* DT ti_conf */
+ int phy_count; /* DT phy-names count */
+ struct phy **phy;
+ struct irq_domain *irq_domain;
+ struct clk *clk;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
+};
+
+#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+ return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->base + offset);
+}
+
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+ return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
+static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+ return !!(reg & LINK_UP);
+}
+
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 reg;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_err(dev, "link is already up\n");
+ return 0;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg |= LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ return 0;
+}
+
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+ LEG_EP_INTERRUPTS | MSI);
+
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+ MSI | LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+ INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+ dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ dra7xx_pcie_enable_interrupts(dra7xx);
+
+ return 0;
+}
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = dra7xx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned long val;
+ int pos;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
+
+ pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
+ }
+
+ return 1;
+}
+
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ unsigned long reg;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+ switch (reg) {
+ case MSI:
+ dra7xx_pcie_handle_msi_irq(pp);
+ break;
+ case INTA:
+ case INTB:
+ case INTC:
+ case INTD:
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
+ break;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct dra7xx_pcie *dra7xx = arg;
+ struct dw_pcie *pci = dra7xx->pci;
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+ if (reg & ERR_SYS)
+ dev_dbg(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_dbg(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_COR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_dbg(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_ECRC)
+ dev_dbg(dev, "ECRC Error\n");
+
+ if (reg & PME_TURN_OFF)
+ dev_dbg(dev,
+ "Power Management Event Turn-Off message received\n");
+
+ if (reg & PME_TO_ACK)
+ dev_dbg(dev,
+ "Power Management Turn-Off Ack message received\n");
+
+ if (reg & PM_PME)
+ dev_dbg(dev, "PM Power Management Event message received\n");
+
+ if (reg & LINK_REQ_RST)
+ dev_dbg(dev, "Link Request Reset\n");
+
+ if (reg & LINK_UP_EVT) {
+ if (dra7xx->mode == DW_PCIE_EP_TYPE)
+ dw_pcie_ep_linkup(ep);
+ dev_dbg(dev, "Link-up state change\n");
+ }
+
+ if (reg & CFG_BME_EVT)
+ dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
+
+ if (reg & CFG_MSE_EVT)
+ dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+ return IRQ_HANDLED;
+}
+
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .host_init = dra7xx_pcie_host_init,
+};
+
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+ mdelay(1);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+ u8 interrupt_num)
+{
+ u32 reg;
+
+ reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+ reg |= MSI_REQ_GRANT;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dra7xx_pcie_raise_legacy_irq(dra7xx);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dra7xx_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dra7xx_pcie_ep_init,
+ .raise_irq = dra7xx_pcie_raise_irq,
+ .get_features = dra7xx_pcie_get_features,
+};
+
+static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dra7xx->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->dbi_base2 =
+ devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie *pci = dra7xx->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ /* MSI IRQ is muxed */
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dra7xx_pcie_init_irq_domain(pp);
+ if (ret < 0)
+ return ret;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pp->ops = &dra7xx_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+ .start_link = dra7xx_pcie_establish_link,
+ .stop_link = dra7xx_pcie_stop_link,
+ .link_up = dra7xx_pcie_link_up,
+};
+
+static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
+{
+ int phy_count = dra7xx->phy_count;
+
+ while (phy_count--) {
+ phy_power_off(dra7xx->phy[phy_count]);
+ phy_exit(dra7xx->phy[phy_count]);
+ }
+}
+
+static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
+{
+ int phy_count = dra7xx->phy_count;
+ int ret;
+ int i;
+
+ for (i = 0; i < phy_count; i++) {
+ ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_init(dra7xx->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(dra7xx->phy[i]);
+ if (ret < 0) {
+ phy_exit(dra7xx->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(dra7xx->phy[i]);
+ phy_exit(dra7xx->phy[i]);
+ }
+
+ return ret;
+}
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ {
+ .compatible = "ti,dra7-pcie",
+ .data = &dra7xx_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra7-pcie-ep",
+ .data = &dra7xx_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-rc",
+ .data = &dra746_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-rc",
+ .data = &dra726_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-ep",
+ .data = &dra746_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-ep",
+ .data = &dra726_pcie_ep_of_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+/*
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(np,
+ "ti,syscon-unaligned-access");
+ if (IS_ERR(regmap)) {
+ dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+ return -EINVAL;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+ 2, 0, &args);
+ if (ret) {
+ dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+ args.args[1]);
+ if (ret)
+ dev_err(dev, "failed to enable unaligned access\n");
+
+ of_node_put(args.np);
+
+ return ret;
+}
+
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+ u32 b1co_mode_sel_mask)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *pcie_syscon;
+ unsigned int pcie_reg;
+ u32 mask;
+ u32 val;
+
+ pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ if (IS_ERR(pcie_syscon)) {
+ dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
+ &pcie_reg)) {
+ dev_err(dev, "couldn't get lane selection reg offset\n");
+ return -EINVAL;
+ }
+
+ mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+ val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+ regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_probe(struct platform_device *pdev)
+{
+ u32 reg;
+ int ret;
+ int irq;
+ int i;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ void __iomem *base;
+ struct dw_pcie *pci;
+ struct dra7xx_pcie *dra7xx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char name[10];
+ struct gpio_desc *reset;
+ const struct dra7xx_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+ b1co_mode_sel_mask = data->b1co_mode_sel_mask;
+
+ dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+ if (!dra7xx)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 0) {
+ dev_err(dev, "unable to find the strings\n");
+ return phy_count;
+ }
+
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ dra7xx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(dra7xx->clk))
+ return dev_err_probe(dev, PTR_ERR(dra7xx->clk),
+ "clock request failed");
+
+ ret = clk_prepare_enable(dra7xx->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < phy_count; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i]))
+ return PTR_ERR(phy[i]);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ dra7xx->base = base;
+ dra7xx->phy = phy;
+ dra7xx->pci = pci;
+ dra7xx->phy_count = phy_count;
+
+ if (phy_count == 2) {
+ ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+ if (ret < 0)
+ dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+ }
+
+ ret = dra7xx_pcie_enable_phy(dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dra7xx);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+ goto err_gpio;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_RC);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ dev_err(dev, "WA for Errata i870 not applied\n");
+
+ ret = dra7xx_add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_EP);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ goto err_gpio;
+
+ ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+ dra7xx->mode = mode;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_gpio;
+ }
+
+ return 0;
+
+err_gpio:
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct dw_pcie *pci = dra7xx->pci;
+ u32 val;
+
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* clear MSE */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val &= ~PCI_COMMAND_MEMORY;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct dw_pcie *pci = dra7xx->pci;
+ u32 val;
+
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* set MSE */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+
+ dra7xx_pcie_disable_phy(dra7xx);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dra7xx_pcie_enable_phy(dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ dra7xx_pcie_stop_link(dra7xx->pci);
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+
+ clk_disable_unprepare(dra7xx->clk);
+}
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
+};
+
+static struct platform_driver dra7xx_pcie_driver = {
+ .probe = dra7xx_pcie_probe,
+ .driver = {
+ .name = "dra7-pcie",
+ .of_match_table = of_dra7xx_pcie_match,
+ .suppress_bind_attrs = true,
+ .pm = &dra7xx_pcie_pm_ops,
+ },
+ .shutdown = dra7xx_pcie_shutdown,
+};
+module_platform_driver(dra7xx_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
new file mode 100644
index 000000000..c6bede346
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Samsung Exynos SoCs
+ *
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ * Jaehoon Chung <jh80.chung@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT BIT(0)
+#define IRQ_INTB_ASSERT BIT(2)
+#define IRQ_INTC_ASSERT BIT(4)
+#define IRQ_INTD_ASSERT BIT(6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE BIT(0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+
+struct exynos_pcie {
+ struct dw_pcie pci;
+ void __iomem *elbi_base;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct regulator_bulk_data supplies[2];
+};
+
+static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
+{
+ struct device *dev = ep->pci.dev;
+ int ret;
+
+ ret = clk_prepare_enable(ep->clk);
+ if (ret) {
+ dev_err(dev, "cannot enable pcie rc clock");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ep->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot enable pcie bus clock");
+ goto err_bus_clk;
+ }
+
+ return 0;
+
+err_bus_clk:
+ clk_disable_unprepare(ep->clk);
+
+ return ret;
+}
+
+static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+{
+ clk_disable_unprepare(ep->bus_clk);
+ clk_disable_unprepare(ep->clk);
+}
+
+static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
+{
+ writel(val, base + reg);
+}
+
+static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ if (on)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ if (on)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+}
+
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
+{
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+}
+
+static int exynos_pcie_start_link(struct dw_pcie *pci)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
+
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+
+ /* assert LTSSM enable */
+ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
+{
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct exynos_pcie *ep = arg;
+
+ exynos_pcie_clear_irq_pulse(ep);
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
+{
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
+
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+}
+
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
+
+ exynos_pcie_sideband_dbi_r_mode(ep, true);
+ dw_pcie_read(base + reg, size, &val);
+ exynos_pcie_sideband_dbi_r_mode(ep, false);
+ return val;
+}
+
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+ exynos_pcie_sideband_dbi_w_mode(ep, true);
+ dw_pcie_write(base + reg, size, val);
+ exynos_pcie_sideband_dbi_w_mode(ep, false);
+}
+
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
+
+static int exynos_pcie_link_up(struct dw_pcie *pci)
+{
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+
+ return (val & PCIE_ELBI_XMLH_LINKUP);
+}
+
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+ pp->bridge->ops = &exynos_pci_ops;
+
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_init(ep->phy);
+ phy_power_on(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
+ .host_init = exynos_pcie_host_init,
+};
+
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", ep);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = exynos_pcie_read_dbi,
+ .write_dbi = exynos_pcie_write_dbi,
+ .link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
+};
+
+static int exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_pcie *ep;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
+
+ ep->phy = devm_of_phy_get(dev, np, NULL);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
+
+ /* External Local Bus interface (ELBI) registers */
+ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(ep->elbi_base))
+ return PTR_ERR(ep->elbi_base);
+
+ ep->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ep->clk);
+ }
+
+ ep->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(ep->bus_clk)) {
+ dev_err(dev, "Failed to get pcie bus clock\n");
+ return PTR_ERR(ep->bus_clk);
+ }
+
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
+
+ ret = exynos_pcie_init_clk_resources(ep);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ep);
+
+ ret = exynos_add_pcie_port(ep, pdev);
+ if (ret < 0)
+ goto fail_probe;
+
+ return 0;
+
+fail_probe:
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return ret;
+}
+
+static int exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
+};
+
+static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
+ .remove = exynos_pcie_remove,
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
+ },
+};
+module_platform_driver(exynos_pcie_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
new file mode 100644
index 000000000..74703362a
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -0,0 +1,1612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-designware.h"
+
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
+#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+
+#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx6_pcie_variants {
+ IMX6Q,
+ IMX6SX,
+ IMX6QP,
+ IMX7D,
+ IMX8MQ,
+ IMX8MM,
+ IMX8MP,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
+};
+
+#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
+#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+
+struct imx6_pcie_drvdata {
+ enum imx6_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+ u32 flags;
+ int dbi_length;
+ const char *gpr;
+};
+
+struct imx6_pcie {
+ struct dw_pcie *pci;
+ int reset_gpio;
+ bool gpio_active_high;
+ bool link_is_up;
+ struct clk *pcie_bus;
+ struct clk *pcie_phy;
+ struct clk *pcie_inbound_axi;
+ struct clk *pcie;
+ struct clk *pcie_aux;
+ struct regmap *iomuxc_gpr;
+ u16 msi_ctrl;
+ u32 controller_id;
+ struct reset_control *pciephy_reset;
+ struct reset_control *apps_reset;
+ struct reset_control *turnoff_reset;
+ u32 tx_deemph_gen1;
+ u32 tx_deemph_gen2_3p5db;
+ u32 tx_deemph_gen2_6db;
+ u32 tx_swing_full;
+ u32 tx_swing_low;
+ struct regulator *vpcie;
+ struct regulator *vph;
+ void __iomem *phy_base;
+
+ /* power domain for pcie */
+ struct device *pd_pcie;
+ /* power domain for pcie phy */
+ struct device *pd_pcie_phy;
+ struct phy *phy;
+ const struct imx6_pcie_drvdata *drvdata;
+};
+
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK BIT(16)
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD 0x10
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
+#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
+#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
+
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
+
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4 0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
+
+#define PCIE_PHY_CMN_REG15 0x54
+#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
+
+#define PCIE_PHY_CMN_REG24 0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
+
+#define PCIE_PHY_CMN_REG26 0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx6_pcie->drvdata->variant != IMX8MM &&
+ imx6_pcie->drvdata->variant != IMX8MM_EP &&
+ imx6_pcie->drvdata->variant != IMX8MP &&
+ imx6_pcie->drvdata->variant != IMX8MP_EP);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val, mode;
+
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ else
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ if (imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ mode);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ }
+ break;
+ default:
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ break;
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ bool val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 val;
+ int ret;
+
+ val = PCIE_PHY_CTRL_DATA(addr);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ val |= PCIE_PHY_CTRL_CAP_ADR;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ val = PCIE_PHY_CTRL_DATA(addr);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+ return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = PCIE_PHY_CTRL_RD;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
+
+ /* deassert Read signal */
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
+
+ return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ if (ret)
+ return ret;
+
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* capture data */
+ var |= PCIE_PHY_CTRL_CAP_DAT;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = PCIE_PHY_CTRL_WR;
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(imx6_pcie, true);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = PCIE_PHY_CTRL_DATA(data);
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(imx6_pcie, false);
+ if (ret)
+ return ret;
+
+ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
+
+ return 0;
+}
+
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+ fallthrough;
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
+
+ imx6_pcie_configure_type(imx6_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u16 val;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+{
+ u16 tmp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return;
+
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+}
+
+#ifdef CONFIG_ARM
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+ unsigned long instr = *(unsigned long *)pc;
+ int reg = (instr >> 12) & 15;
+
+ /*
+ * If the instruction being executed was a read,
+ * make it look like it read all-ones.
+ */
+ if ((instr & 0x0c100000) == 0x04100000) {
+ unsigned long val;
+
+ if (instr & 0x00400000)
+ val = 255;
+ else
+ val = -1;
+
+ regs->uregs[reg] = val;
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct device_link *link;
+
+ /* Do nothing when in a single power domain */
+ if (dev->pm_domain)
+ return 0;
+
+ imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pd_pcie))
+ return PTR_ERR(imx6_pcie->pd_pcie);
+ /* Do nothing when power domain missing */
+ if (!imx6_pcie->pd_pcie)
+ return 0;
+ link = device_link_add(dev, imx6_pcie->pd_pcie,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ return -EINVAL;
+ }
+
+ imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pd_pcie_phy))
+ return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+ link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ unsigned int offset;
+ int ret = 0;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_axi clock\n");
+ break;
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ /*
+ * the async reset input need ref clock to sync internally,
+ * when the ref clock comes after reset, internal synced
+ * reset time is too short, cannot meet the requirement.
+ * add one ~10us delay here.
+ */
+ usleep_range(10, 100);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ break;
+ case IMX7D:
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ break;
+ }
+
+ offset = imx6_pcie_grp_offset(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+ break;
+ }
+
+ return ret;
+}
+
+static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_phy clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_bus clock\n");
+ goto err_pcie_bus;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clock\n");
+ goto err_pcie;
+ }
+
+ ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie ref clock\n");
+ goto err_ref_clk;
+ }
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+
+ return ret;
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ imx6_pcie_disable_ref_clk(imx6_pcie);
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ }
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
+}
+
+static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ break;
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+
+ /* Workaround for ERR010728, failure of PCI-e PLL VCO to
+ * oscillate, especially when cold. This turns off "Duty-cycle
+ * Corrector" and other mysterious undocumented things.
+ */
+ if (likely(imx6_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
+ | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+
+ imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+ usleep_range(200, 500);
+ break;
+ case IMX6Q: /* Nothing to do */
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ break;
+ }
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 tmp;
+ unsigned int retries;
+
+ for (retries = 0; retries < 200; retries++) {
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(100, 1000);
+ }
+
+ dev_err(dev, "Speed change timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2,
+ IMX6Q_GPR12_PCIE_CTL_2);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_deassert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
+static int imx6_pcie_start_link(struct dw_pcie *pci)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ int ret;
+
+ /*
+ * Force Gen1 operation when starting the link. In case the link is
+ * started in Gen2 mode, there is a possibility the devices on the
+ * bus will not be detected at all. This happens with PCIe switches.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+
+ if (pci->link_gen > 1) {
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= pci->link_gen;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+
+ /*
+ * Start Directed Speed Change so the best possible
+ * speed both link partners support can be negotiated.
+ */
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ if (imx6_pcie->drvdata->flags &
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ /*
+ * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+ * from i.MX6 family when no link speed transition
+ * occurs and we go Gen1 -> yep, Gen1. The difference
+ * is that, in such case, it will not be cleared by HW
+ * which will cause the following code to report false
+ * failure.
+ */
+
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ }
+
+ /* Make sure link training is finished as well! */
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+ } else {
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
+ }
+
+ imx6_pcie->link_is_up = true;
+ tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
+ return 0;
+
+err_reset_phy:
+ imx6_pcie->link_is_up = false;
+ dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
+ imx6_pcie_reset_phy(imx6_pcie);
+ return 0;
+}
+
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* Turn off PCIe LTSSM */
+ imx6_pcie_ltssm_disable(dev);
+}
+
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ int ret;
+
+ if (imx6_pcie->vpcie) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+
+ ret = imx6_pcie_clk_enable(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_init(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_off;
+ }
+ }
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ imx6_setup_phy_mpll(imx6_pcie);
+
+ return 0;
+
+err_phy_off:
+ if (imx6_pcie->phy)
+ phy_exit(imx6_pcie->phy);
+err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+err_reg_disable:
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+ return ret;
+}
+
+static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (imx6_pcie->phy) {
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
+ }
+ imx6_pcie_clk_disable(imx6_pcie);
+
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+}
+
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+ .host_init = imx6_pcie_host_init,
+ .host_deinit = imx6_pcie_host_exit,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = imx6_pcie_start_link,
+ .stop_link = imx6_pcie_stop_link,
+};
+
+static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &imx8m_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = imx6_pcie_ep_init,
+ .raise_irq = imx6_pcie_ep_raise_irq,
+ .get_features = imx6_pcie_ep_get_features,
+};
+
+static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ unsigned int pcie_dbi2_offset;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ imx6_pcie_host_init(pp);
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
+ pcie_dbi2_offset = SZ_1M;
+ break;
+ default:
+ pcie_dbi2_offset = SZ_4K;
+ break;
+ }
+ pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ return 0;
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+ struct device *dev = imx6_pcie->pci->dev;
+
+ /* Some variants have a turnoff reset in DT */
+ if (imx6_pcie->turnoff_reset) {
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+ goto pm_turnoff_sleep;
+ }
+
+ /* Others poke directly at IOMUXC registers */
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ default:
+ dev_err(dev, "PME_Turn_Off not implemented\n");
+ return;
+ }
+
+ /*
+ * Components with an upstream port must respond to
+ * PME_Turn_Off with PME_TO_Ack but we can't check.
+ *
+ * The standard recommends a 1-10ms timeout after which to
+ * proceed anyway as if acks were received.
+ */
+pm_turnoff_sleep:
+ usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+{
+ u8 offset;
+ u16 val;
+ struct dw_pcie *pci = imx6_pcie->pci;
+
+ if (pci_msi_enabled()) {
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ if (save) {
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ imx6_pcie->msi_ctrl = val;
+ } else {
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = imx6_pcie->msi_ctrl;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+ }
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ return 0;
+
+ imx6_pcie_msi_save_restore(imx6_pcie, true);
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_stop_link(imx6_pcie->pci);
+ imx6_pcie_host_exit(pp);
+
+ return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+ int ret;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ return 0;
+
+ ret = imx6_pcie_host_init(pp);
+ if (ret)
+ return ret;
+ imx6_pcie_msi_save_restore(imx6_pcie, false);
+ dw_pcie_setup_rc(pp);
+
+ if (imx6_pcie->link_is_up)
+ imx6_pcie_start_link(imx6_pcie->pci);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
+};
+
+static int imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct imx6_pcie *imx6_pcie;
+ struct device_node *np;
+ struct resource *dbi_base;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u16 val;
+
+ imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &imx6_pcie_host_ops;
+
+ imx6_pcie->pci = pci;
+ imx6_pcie->drvdata = of_device_get_match_data(dev);
+
+ /* Find the PHY if one is defined, only imx7d uses it */
+ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+ if (np) {
+ struct resource res;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Unable to map PCIe PHY\n");
+ return ret;
+ }
+ imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx6_pcie->phy_base))
+ return PTR_ERR(imx6_pcie->phy_base);
+ }
+
+ pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(node,
+ "reset-gpio-active-high");
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high ?
+ GPIOF_OUT_INIT_HIGH :
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
+ if (ret) {
+ dev_err(dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->reset_gpio;
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(imx6_pcie->pcie_bus))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
+ "pcie_bus clock source missing or invalid\n");
+
+ imx6_pcie->pcie = devm_clk_get(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pcie))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
+ "pcie clock source missing or invalid\n");
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
+ "pcie_inbound_axi clock missing or invalid\n");
+ break;
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ fallthrough;
+ case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
+
+ imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
+ "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx6_pcie->pciephy_reset);
+ }
+
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset)) {
+ dev_err(dev, "Failed to get PCIE APPS reset control\n");
+ return PTR_ERR(imx6_pcie->apps_reset);
+ }
+ break;
+ case IMX8MM:
+ case IMX8MM_EP:
+ case IMX8MP:
+ case IMX8MP_EP:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ "failed to get pcie apps reset control\n");
+
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+
+ break;
+ default:
+ break;
+ }
+ /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
+ if (imx6_pcie->phy == NULL) {
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
+ }
+
+
+ /* Grab turnoff reset */
+ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ dev_err(dev, "Failed to get TURNOFF reset control\n");
+ return PTR_ERR(imx6_pcie->turnoff_reset);
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+
+ /* Grab PCIe PHY Tx Settings */
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
+ &imx6_pcie->tx_deemph_gen1))
+ imx6_pcie->tx_deemph_gen1 = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
+ &imx6_pcie->tx_deemph_gen2_3p5db))
+ imx6_pcie->tx_deemph_gen2_3p5db = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
+ &imx6_pcie->tx_deemph_gen2_6db))
+ imx6_pcie->tx_deemph_gen2_6db = 20;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-full",
+ &imx6_pcie->tx_swing_full))
+ imx6_pcie->tx_swing_full = 127;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-low",
+ &imx6_pcie->tx_swing_low))
+ imx6_pcie->tx_swing_low = 127;
+
+ /* Limit link speed */
+ pci->link_gen = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
+
+ imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx6_pcie->vpcie)) {
+ if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vpcie);
+ imx6_pcie->vpcie = NULL;
+ }
+
+ imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx6_pcie->vph)) {
+ if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vph);
+ imx6_pcie->vph = NULL;
+ }
+
+ platform_set_drvdata(pdev, imx6_pcie);
+
+ ret = imx6_pcie_attach_pd(dev);
+ if (ret)
+ return ret;
+
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ }
+ }
+
+ return 0;
+}
+
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+ /* bring down link, so bootloader gets clean state in case of reboot */
+ imx6_pcie_assert_core_reset(imx6_pcie);
+}
+
+static const struct imx6_pcie_drvdata drvdata[] = {
+ [IMX6Q] = {
+ .variant = IMX6Q,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX6SX] = {
+ .variant = IMX6SX,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX6QP] = {
+ .variant = IMX6QP,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ },
+ [IMX7D] = {
+ .variant = IMX7D,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
+ },
+ [IMX8MQ] = {
+ .variant = IMX8MQ,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ },
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ },
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ },
+};
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
+ { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+ { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+ { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ {},
+};
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .of_match_table = imx6_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &imx6_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = imx6_pcie_probe,
+ .shutdown = imx6_pcie_shutdown,
+};
+
+static void imx6_pcie_quirk(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct dw_pcie_rp *pp = bus->sysdata;
+
+ /* Bus parent is the PCI bridge, its parent is this platform driver */
+ if (!bus->dev.parent || !bus->dev.parent->parent)
+ return;
+
+ /* Make sure we only quirk devices associated with this driver */
+ if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ return;
+
+ if (pci_is_root_bus(bus)) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ /*
+ * Limit config length to avoid the kernel reading beyond
+ * the register set and causing an abort on i.MX 6Quad
+ */
+ if (imx6_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ dev_info(&dev->dev, "Limiting cfg_size to %d\n",
+ dev->cfg_size);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+ PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+
+static int __init imx6_pcie_init(void)
+{
+#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
+ /*
+ * Since probe() can be deferred we need to make sure that
+ * hook_fault_code is not called after __init memory is freed
+ * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+ * we can install the handler here without risking it
+ * accessing some uninitialized driver state.
+ */
+ hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "external abort on non-linefetch");
+#endif
+
+ return platform_driver_register(&imx6_pcie_driver);
+}
+device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
new file mode 100644
index 000000000..cf3836561
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * https://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PCIE_VENDORID_MASK 0xffff
+#define PCIE_DEVICEID_SHIFT 16
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define LTSSM_EN_VAL BIT(0)
+#define OB_XLAT_EN_VAL BIT(1)
+#define DBI_CS2 BIT(5)
+
+#define CFG_SETUP 0x008
+#define CFG_BUS(x) (((x) & 0xff) << 16)
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
+#define CFG_FUNC(x) ((x) & 0x7)
+#define CFG_TYPE1 BIT(24)
+
+#define OB_SIZE 0x030
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
+#define OB_ENABLEN BIT(0)
+#define OB_WIN_SIZE 8 /* 8MB */
+
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+
+#define MSI_IRQ 0x054
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
+#define MSI_IRQ_OFFSET 4
+
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System error */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
+
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ u32 version;
+};
+
+struct keystone_pcie {
+ struct dw_pcie *pci;
+ /* PCI Device ID */
+ u32 device_id;
+ int legacy_host_irqs[PCI_NUM_INTX];
+ struct device_node *legacy_intc_np;
+
+ int msi_host_irq;
+ int num_lanes;
+ u32 num_viewport;
+ struct phy **phy;
+ struct device_link **link;
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ /* Application register space */
+ void __iomem *va_app_base; /* DT 1st resource */
+ struct resource app;
+ bool is_am6;
+};
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+ u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ struct dw_pcie *pci;
+ u64 msi_target;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
+ BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void ks_pcie_msi_unmask(struct irq_data *data)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
+ BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+ int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 pending;
+
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
+
+ if (BIT(0) & pending) {
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+ }
+
+ /* EOI the INTx interrupt */
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+ u32 reg;
+ struct device *dev = ks_pcie->pci->dev;
+
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+ if (!reg)
+ return IRQ_NONE;
+
+ if (reg & ERR_SYS)
+ dev_err(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_err(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_CORR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
+ dev_err(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
+ dev_err(dev, "ECRC Error\n");
+
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_pcie_ack_legacy_irq,
+ .irq_mask = ks_pcie_mask_legacy_irq,
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .map = ks_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u64 start, end;
+ struct resource *mem;
+ int i;
+
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ start = mem->start;
+ end = mem->end;
+
+ /* Disable BARs for inbound access */
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ if (ks_pcie->is_am6)
+ return;
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; i < num_viewport && (start < end); i++) {
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+ lower_32_bits(start) | OB_ENABLEN);
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+ upper_32_bits(start));
+ start += OB_WIN_SIZE * SZ_1M;
+ }
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (!pci_is_root_bus(bus->parent))
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return pp->va_cfg0_base + where;
+}
+
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+ * @bus: A pointer to the PCI bus structure.
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ if (!pci_is_root_bus(bus))
+ return 0;
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ return 0;
+}
+
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = ks_pcie_v3_65_add_bus,
+};
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ /* Disable Link training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
+
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+ return 0;
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { 0, },
+ };
+
+ if (pci_is_root_bus(bus))
+ bridge = dev;
+
+ /* look for the host bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+ }
+
+ if (!bridge)
+ return;
+
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = desc->irq_data.hwirq;
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ u32 offset = irq - ks_pcie->msi_host_irq;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, reg, pos;
+
+ dev_dbg(dev, "%s, irq %d\n", __func__, irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
+{
+ struct device *dev = ks_pcie->pci->dev;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
+
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
+
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
+
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
+ }
+
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
+}
+
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since legacy interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_host_irqs[i] = irq;
+
+ irq_set_chained_handler_and_data(irq,
+ ks_pcie_legacy_irq_handler,
+ ks_pcie);
+ }
+
+ legacy_irq_domain =
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+err:
+ of_node_put(intc_np);
+ return ret;
+}
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+#endif
+
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+ int ret;
+ unsigned int id;
+ struct regmap *devctrl_regs;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = regmap_read(devctrl_regs, offset, &id);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ int ret;
+
+ pp->bridge->ops = &ks_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ks_pcie_stop_link(pci);
+ ks_pcie_setup_rc_app_regs(ks_pcie);
+ writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ pci->dbi_base + PCI_IO_BASE);
+
+ ret = ks_pcie_init_id(ks_pcie);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_ARM
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+#endif
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+ .host_init = ks_pcie_host_init,
+ .msi_host_init = ks_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .host_init = ks_pcie_host_init,
+};
+
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
+{
+ struct keystone_pcie *ks_pcie = priv;
+
+ return ks_pcie_handle_error_irq(ks_pcie);
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
+
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
+ .link_up = ks_pcie_link_up,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ u8 int_pin;
+
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
+
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[2] = SZ_1M,
+ .bar_fixed_size[3] = SZ_64K,
+ .bar_fixed_size[4] = 256,
+ .bar_fixed_size[5] = SZ_1M,
+ .align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .ep_init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
+};
+
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
+{
+ int num_lanes = ks_pcie->num_lanes;
+
+ while (num_lanes--) {
+ phy_power_off(ks_pcie->phy[num_lanes]);
+ phy_exit(ks_pcie->phy[num_lanes]);
+ }
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+ int i;
+ int ret;
+ int num_lanes = ks_pcie->num_lanes;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_init(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(ks_pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(ks_pcie->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(ks_pcie->phy[i]);
+ phy_exit(ks_pcie->phy[i]);
+ }
+
+ return ret;
+}
+
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .version = DW_PCIE_VER_365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+
+static int ks_pcie_probe(struct platform_device *pdev)
+{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+ struct dw_pcie *pci;
+ struct keystone_pcie *ks_pcie;
+ struct device_link **link;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ void __iomem *base;
+ u32 num_viewport;
+ struct phy **phy;
+ u32 num_lanes;
+ char name[10];
+ u32 version;
+ int ret;
+ int irq;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
+ ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
+ if (!ks_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
+ pci->dev = dev;
+ pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
+ if (ret < 0) {
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+ if (ret)
+ num_lanes = 1;
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lanes; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_link;
+ }
+
+ if (!phy[i])
+ continue;
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ ks_pcie->np = np;
+ ks_pcie->pci = pci;
+ ks_pcie->link = link;
+ ks_pcie->num_lanes = num_lanes;
+ ks_pcie->phy = phy;
+
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
+ ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
+ }
+
+ platform_set_drvdata(pdev, ks_pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ if (dw_pcie_ver_is_ge(pci, 480A))
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ else
+ ret = ks_pcie_set_mode(dev);
+ if (ret < 0)
+ goto err_get_sync;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ goto err_get_sync;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
+
+ return 0;
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+ while (--i >= 0 && link[i])
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+static int ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+ int num_lanes = ks_pcie->num_lanes;
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+ while (num_lanes--)
+ device_link_del(link[num_lanes]);
+
+ return 0;
+}
+
+static struct platform_driver ks_pcie_driver = {
+ .probe = ks_pcie_probe,
+ .remove = ks_pcie_remove,
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = ks_pcie_of_match,
+ },
+};
+builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 000000000..b1faf41a2
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PEX_PF0_CONFIG 0xC0014
+#define PEX_PF0_CFG_READY BIT(0)
+
+/* PEX PFa PCIE PME and message interrupt registers*/
+#define PEX_PF0_PME_MES_DR 0xC0020
+#define PEX_PF0_PME_MES_DR_LUD BIT(7)
+#define PEX_PF0_PME_MES_DR_LDD BIT(9)
+#define PEX_PF0_PME_MES_DR_HRD BIT(10)
+
+#define PEX_PF0_PME_MES_IER 0xC0028
+#define PEX_PF0_PME_MES_IER_LUDIE BIT(7)
+#define PEX_PF0_PME_MES_IER_LDDIE BIT(9)
+#define PEX_PF0_PME_MES_IER_HRDIE BIT(10)
+
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
+};
+
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+ int irq;
+ u32 lnkcap;
+ bool big_endian;
+};
+
+static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ return ioread32be(pci->dbi_base + offset);
+ else
+ return ioread32(pci->dbi_base + offset);
+}
+
+static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ iowrite32be(value, pci->dbi_base + offset);
+ else
+ iowrite32(value, pci->dbi_base + offset);
+}
+
+static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+{
+ struct ls_pcie_ep *pcie = dev_id;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val, cfg;
+ u8 offset;
+
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PEX_PF0_PME_MES_DR_LUD) {
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ /*
+ * The values of the Maximum Link Width and Supported Link
+ * Speed from the Link Capabilities Register will be lost
+ * during link down or hot reset. Restore initial value
+ * that configured by the Reset Configuration Word (RCW).
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+ cfg |= PEX_PF0_CFG_READY;
+ ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ dw_pcie_ep_linkup(&pci->ep);
+
+ dev_dbg(pci->dev, "Link up\n");
+ } else if (val & PEX_PF0_PME_MES_DR_LDD) {
+ dev_dbg(pci->dev, "Link down\n");
+ pci_epc_linkdown(pci->ep.epc);
+ } else if (val & PEX_PF0_PME_MES_DR_HRD) {
+ dev_dbg(pci->dev, "Hot reset\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "pme");
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register PCIe IRQ\n");
+ return ret;
+ }
+
+ /* Enable interrupts */
+ val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
+ PEX_PF0_PME_MES_IER_LUDIE;
+ ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
+ enum pci_barno bar;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
+ .ep_init = ls_pcie_ep_init,
+ .raise_irq = ls_pcie_ep_raise_irq,
+ .get_features = ls_pcie_ep_get_features,
+ .func_conf_select = ls_pcie_ep_func_conf_select,
+};
+
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
+ .func_offset = 0x8000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
+ { },
+};
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
+ struct resource *dbi_base;
+ u8 offset;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->linkup_notifier = true;
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->ep.ops = &ls_pcie_ep_ops;
+
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ platform_set_drvdata(pdev, pcie);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ return ls_pcie_ep_interrupt_init(pcie, pdev);
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+ .driver = {
+ .name = "layerscape-pcie-ep",
+ .of_match_table = ls_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
new file mode 100644
index 000000000..b931d5976
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2021 NXP
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+#define PCIE_IATU_NUM 6
+
+struct ls_pcie_drvdata {
+ const u32 pf_off;
+ bool pm_support;
+};
+
+struct ls_pcie {
+ struct dw_pcie *pci;
+ const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_base;
+ bool big_endian;
+};
+
+#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr)
+#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
+
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u32 header_type;
+
+ header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
+}
+
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+ u32 val;
+ struct dw_pcie *pci = pcie->pci;
+
+ val = ioread32(pci->dbi_base + PCIE_STRFMR1);
+ val &= 0xDFFFFFFF;
+ iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
+}
+
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
+static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off)
+{
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_base + off);
+
+ return ioread32(pcie->pf_base + off);
+}
+
+static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ iowrite32be(val, pcie->pf_base + off);
+ else
+ iowrite32(val, pcie->pf_base + off);
+}
+
+static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR),
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
+ if (ret)
+ dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
+}
+
+static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
+ * to exit L2 state.
+ */
+ val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ /*
+ * L2 exit timeout of 10ms is not defined in the specifications,
+ * it was chosen based on empirical observations.
+ */
+ ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S),
+ 1000,
+ 10000);
+ if (ret)
+ dev_err(pcie->pci->dev, "L2 exit timeout\n");
+}
+
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ ls_pcie_fix_error_response(pcie);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ ls_pcie_clear_multifunction(pcie);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ ls_pcie_drop_msg_tlp(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+ .host_init = ls_pcie_host_init,
+ .pme_turn_off = ls_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
+ .pm_support = false,
+};
+
+static const struct ls_pcie_drvdata layerscape_drvdata = {
+ .pf_off = 0xc0000,
+ .pm_support = true,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
+ { },
+};
+
+static int ls_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie *pcie;
+ struct resource *dbi_base;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->pp.ops = &ls_pcie_host_ops;
+
+ pcie->pci = pci;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off;
+
+ if (!ls_pcie_is_bridge(pcie))
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ return dw_pcie_suspend_noirq(pcie->pci);
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ ls_pcie_exit_from_l2(&pcie->pci->pp);
+
+ return dw_pcie_resume_noirq(pcie->pci);
+}
+
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
+};
+
+static struct platform_driver ls_pcie_driver = {
+ .probe = ls_pcie_probe,
+ .driver = {
+ .name = "layerscape-pcie",
+ .of_match_table = ls_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
new file mode 100644
index 000000000..407558f5d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0 0x0
+#define APP_LTSSM_ENABLE BIT(7)
+
+#define PCIE_CFG_STATUS12 0x30
+#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
+#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17 0x44
+#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT 4000
+#define PORT_CLK_RATE 100000000UL
+#define MAX_PAYLOAD_SIZE 256
+#define MAX_READ_REQ_SIZE 256
+#define PCIE_RESET_DELAY 500
+#define PCIE_SHARED_RESET 1
+#define PCIE_NORMAL_RESET 0
+
+enum pcie_data_rate {
+ PCIE_GEN1,
+ PCIE_GEN2,
+ PCIE_GEN3,
+ PCIE_GEN4
+};
+
+struct meson_pcie_clk_res {
+ struct clk *clk;
+ struct clk *port_clk;
+ struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+ struct reset_control *port;
+ struct reset_control *apb;
+};
+
+struct meson_pcie {
+ struct dw_pcie pci;
+ void __iomem *cfg_base;
+ struct meson_pcie_clk_res clk_res;
+ struct meson_pcie_rc_reset mrst;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+ const char *id,
+ u32 reset_type)
+{
+ struct device *dev = mp->pci.dev;
+ struct reset_control *reset;
+
+ if (reset_type == PCIE_SHARED_RESET)
+ reset = devm_reset_control_get_shared(dev, id);
+ else
+ reset = devm_reset_control_get(dev, id);
+
+ return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+ if (IS_ERR(mrst->port))
+ return PTR_ERR(mrst->port);
+ reset_control_deassert(mrst->port);
+
+ mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->apb))
+ return PTR_ERR(mrst->apb);
+ reset_control_deassert(mrst->apb);
+
+ return 0;
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+ struct meson_pcie *mp)
+{
+ struct dw_pcie *pci = &mp->pci;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
+
+ return 0;
+}
+
+static int meson_pcie_power_on(struct meson_pcie *mp)
+{
+ int ret = 0;
+
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void meson_pcie_power_off(struct meson_pcie *mp)
+{
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+}
+
+static int meson_pcie_reset(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+ int ret = 0;
+
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
+
+ reset_control_assert(mrst->port);
+ reset_control_assert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->port);
+ reset_control_deassert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static inline void meson_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk\n");
+ return ERR_PTR(ret);
+ }
+
+ devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk);
+
+ return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+ struct device *dev = mp->pci.dev;
+ struct meson_pcie_clk_res *res = &mp->clk_res;
+
+ res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+ if (IS_ERR(res->port_clk))
+ return PTR_ERR(res->port_clk);
+
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
+ if (IS_ERR(res->general_clk))
+ return PTR_ERR(res->general_clk);
+
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
+ if (IS_ERR(res->clk))
+ return PTR_ERR(res->clk);
+
+ return 0;
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+ gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
+}
+
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
+{
+ u32 val;
+
+ val = meson_cfg_readl(mp, PCIE_CFG0);
+ val |= APP_LTSSM_ENABLE;
+ meson_cfg_writel(mp, val, PCIE_CFG0);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+ struct device *dev = mp->pci.dev;
+
+ /*
+ * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+ * So if input size is not 2^order alignment or less than 2^7 or bigger
+ * than 2^12, just set to default size 2^(1+7).
+ */
+ if (!is_power_of_2(size) || size < 128 || size > 4096) {
+ dev_warn(dev, "payload size %d, set to default 256\n", size);
+ return 1;
+ }
+
+ return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+ struct dw_pcie *pci = &mp->pci;
+ u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int max_payload_size = meson_size_to_payload(mp, size);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+ struct dw_pcie *pci = &mp->pci;
+ u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int max_rd_req_size = meson_size_to_payload(mp, size);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
+}
+
+static int meson_pcie_start_link(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+
+ meson_pcie_ltssm_enable(mp);
+ meson_pcie_assert_reset(mp);
+
+ return 0;
+}
+
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * There is a bug in the MESON AXG PCIe controller whereby software
+ * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+ * the return value in the config accessors.
+ */
+ if ((where & ~3) == PCI_CLASS_REVISION) {
+ if (size <= 2)
+ *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3));
+ *val &= ~0xffffff00;
+ *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 speed_okay = 0;
+ u32 cnt = 0;
+ u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+ do {
+ state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+ state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+ smlh_up = IS_SMLH_LINK_UP(state12);
+ rdlh_up = IS_RDLH_LINK_UP(state12);
+ ltssm_up = IS_LTSSM_UP(state12);
+
+ if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+ speed_okay = 1;
+
+ if (smlh_up)
+ dev_dbg(dev, "smlh_link_up is on\n");
+ if (rdlh_up)
+ dev_dbg(dev, "rdlh_link_up is on\n");
+ if (ltssm_up)
+ dev_dbg(dev, "ltssm_up is on\n");
+ if (speed_okay)
+ dev_dbg(dev, "speed_okay\n");
+
+ if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+ return 1;
+
+ cnt++;
+
+ udelay(10);
+ } while (cnt < WAIT_LINKUP_TIMEOUT);
+
+ dev_err(dev, "error: wait linkup timeout\n");
+ return 0;
+}
+
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+
+ pp->bridge->ops = &meson_pci_ops;
+
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+ .host_init = meson_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct meson_pcie *mp;
+ int ret;
+
+ mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ pci = &mp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
+ pci->num_lanes = 1;
+
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
+ }
+
+ mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mp->reset_gpio)) {
+ dev_err(dev, "get reset gpio failed\n");
+ return PTR_ERR(mp->reset_gpio);
+ }
+
+ ret = meson_pcie_get_resets(mp);
+ if (ret) {
+ dev_err(dev, "get reset resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_get_mems(pdev, mp);
+ if (ret) {
+ dev_err(dev, "get memory resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ ret = meson_pcie_probe_clocks(mp);
+ if (ret) {
+ dev_err(dev, "init clock resources failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ platform_set_drvdata(pdev, mp);
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0) {
+ dev_err(dev, "Add PCIe port failed, %d\n", ret);
+ goto err_phy;
+ }
+
+ return 0;
+
+err_phy:
+ meson_pcie_power_off(mp);
+ return ret;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie",
+ },
+ {
+ .compatible = "amlogic,g12a-pcie",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
+
+static struct platform_driver meson_pcie_driver = {
+ .probe = meson_pcie_probe,
+ .driver = {
+ .name = "meson-pcie",
+ .of_match_table = meson_pcie_of_match,
+ },
+};
+
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 000000000..b8cb77c9c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base))
+ return PTR_ERR(al_pcie->dbi_base);
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+const struct pci_ecam_ops al_pcie_ops = {
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_AL
+
+#include <linux/of_pci.h>
+#include "pcie-designware.h"
+
+#define AL_PCIE_REV_ID_2 2
+#define AL_PCIE_REV_ID_3 3
+#define AL_PCIE_REV_ID_4 4
+
+#define AXI_BASE_OFFSET 0x0
+
+#define DEVICE_ID_OFFSET 0x16c
+
+#define DEVICE_REV_ID 0x0
+#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16)
+
+#define DEVICE_REV_ID_DEV_ID_X4 0
+#define DEVICE_REV_ID_DEV_ID_X8 2
+#define DEVICE_REV_ID_DEV_ID_X16 4
+
+#define OB_CTRL_REV1_2_OFFSET 0x0040
+#define OB_CTRL_REV3_5_OFFSET 0x0030
+
+#define CFG_TARGET_BUS 0x0
+#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0)
+#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8)
+
+#define CFG_CONTROL 0x4
+#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8)
+#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16)
+
+struct al_pcie_reg_offsets {
+ unsigned int ob_ctrl;
+};
+
+struct al_pcie_target_bus_cfg {
+ u8 reg_val;
+ u8 reg_mask;
+ u8 ecam_mask;
+};
+
+struct al_pcie {
+ struct dw_pcie *pci;
+ void __iomem *controller_base; /* base of PCIe unit (not DW core) */
+ struct device *dev;
+ resource_size_t ecam_size;
+ unsigned int controller_rev_id;
+ struct al_pcie_reg_offsets reg_offsets;
+ struct al_pcie_target_bus_cfg target_bus_cfg;
+};
+
+#define to_al_pcie(x) dev_get_drvdata((x)->dev)
+
+static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
+{
+ return readl_relaxed(pcie->controller_base + offset);
+}
+
+static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, pcie->controller_base + offset);
+}
+
+static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
+{
+ u32 dev_rev_id_val;
+ u32 dev_id_val;
+
+ dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
+ DEVICE_ID_OFFSET +
+ DEVICE_REV_ID);
+ dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
+
+ switch (dev_id_val) {
+ case DEVICE_REV_ID_DEV_ID_X4:
+ *rev_id = AL_PCIE_REV_ID_2;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X8:
+ *rev_id = AL_PCIE_REV_ID_3;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X16:
+ *rev_id = AL_PCIE_REV_ID_4;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
+ dev_id_val);
+ return -EINVAL;
+ }
+
+ dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
+
+ return 0;
+}
+
+static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
+{
+ switch (pcie->controller_rev_id) {
+ case AL_PCIE_REV_ID_2:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
+ break;
+ case AL_PCIE_REV_ID_3:
+ case AL_PCIE_REV_ID_4:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
+ pcie->controller_rev_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
+ u8 target_bus,
+ u8 mask_target_bus)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
+ FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
+
+ al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
+ pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
+ reg);
+}
+
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
+ struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
+ unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
+ unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
+
+ if (busnr_reg != target_bus_cfg->reg_val) {
+ dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
+ target_bus_cfg->reg_val, busnr_reg);
+ target_bus_cfg->reg_val = busnr_reg;
+ al_pcie_target_bus_set(pcie,
+ target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+ }
+
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
+}
+
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void al_pcie_config_prepare(struct al_pcie *pcie)
+{
+ struct al_pcie_target_bus_cfg *target_bus_cfg;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ unsigned int ecam_bus_mask;
+ u32 cfg_control_offset;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u32 cfg_control;
+ u32 reg;
+ struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+
+ target_bus_cfg = &pcie->target_bus_cfg;
+
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+ if (ecam_bus_mask > 255) {
+ dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
+ ecam_bus_mask = 255;
+ }
+
+ /* This portion is taken from the transaction address */
+ target_bus_cfg->ecam_mask = ecam_bus_mask;
+ /* This portion is taken from the cfg_target_bus reg */
+ target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
+
+ al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
+
+ /* Set the valid values of secondary and subordinate buses */
+ cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
+ CFG_CONTROL;
+
+ cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
+
+ reg = cfg_control &
+ ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
+
+ reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
+ FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+ al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+}
+
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct al_pcie *pcie = to_al_pcie(pci);
+ int rc;
+
+ pp->bridge->child_ops = &al_child_pci_ops;
+
+ rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
+ if (rc)
+ return rc;
+
+ rc = al_pcie_reg_offsets_set(pcie);
+ if (rc)
+ return rc;
+
+ al_pcie_config_prepare(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops al_pcie_host_ops = {
+ .host_init = al_pcie_host_init,
+};
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *controller_res;
+ struct resource *ecam_res;
+ struct al_pcie *al_pcie;
+ struct dw_pcie *pci;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->pp.ops = &al_pcie_host_ops;
+
+ al_pcie->pci = pci;
+ al_pcie->dev = dev;
+
+ ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (!ecam_res) {
+ dev_err(dev, "couldn't find 'config' reg in DT\n");
+ return -ENOENT;
+ }
+ al_pcie->ecam_size = resource_size(ecam_res);
+
+ controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "controller");
+ al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
+ if (IS_ERR(al_pcie->controller_base)) {
+ dev_err(dev, "couldn't remap controller base %pR\n",
+ controller_res);
+ return PTR_ERR(al_pcie->controller_base);
+ }
+
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
+
+ platform_set_drvdata(pdev, al_pcie);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+ { .compatible = "amazon,al-alpine-v2-pcie",
+ },
+ { .compatible = "amazon,al-alpine-v3-pcie",
+ },
+ {},
+};
+
+static struct platform_driver al_pcie_driver = {
+ .driver = {
+ .name = "al-pcie",
+ .of_match_table = al_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = al_pcie_probe,
+};
+builtin_platform_driver(al_pcie_driver);
+
+#endif /* CONFIG_PCIE_AL*/
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
new file mode 100644
index 000000000..5c999e15c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+
+#include "pcie-designware.h"
+
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
+struct armada8k_pcie {
+ struct dw_pcie *pci;
+ struct clk *clk;
+ struct clk *clk_reg;
+ struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+ unsigned int phy_count;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET 0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
+#define PCIE_APP_LTSSM_EN BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT 4
+#define PCIE_DEVICE_TYPE_MASK 0xF
+#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
+#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
+#define PCIE_INT_A_ASSERT_MASK BIT(9)
+#define PCIE_INT_B_ASSERT_MASK BIT(10)
+#define PCIE_INT_C_ASSERT_MASK BIT(11)
+#define PCIE_INT_D_ASSERT_MASK BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
+/*
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE 0x3511
+#define AWCACHE_DEFAULT_VALUE 0x5311
+
+#define DOMAIN_OUTER_SHAREABLE 0x2
+#define AX_USER_DOMAIN_MASK 0x3
+#define AX_USER_DOMAIN_SHIFT 4
+
+#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
+
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+ pcie->phy_count);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(pcie->phy[i])) {
+ if (PTR_ERR(pcie->phy[i]) != -ENODEV)
+ return PTR_ERR(pcie->phy[i]);
+
+ pcie->phy[i] = NULL;
+ continue;
+ }
+
+ pcie->phy_count++;
+ }
+
+ /* Old bindings miss the PHY handle, so just warn if there is no PHY */
+ if (!pcie->phy_count)
+ dev_warn(dev, "No available PHY\n");
+
+ ret = armada8k_pcie_enable_phys(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+ return ret;
+}
+
+static int armada8k_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
+
+ if ((reg & mask) == mask)
+ return 1;
+
+ dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+ return 0;
+}
+
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ return 0;
+}
+
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (!dw_pcie_link_up(pci)) {
+ /* Disable LTSSM state machine to enable configuration */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_APP_LTSSM_EN);
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+ }
+
+ /* Set the device to root complex mode */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+ reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ /* Set the PCIe master AxCache attributes */
+ dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+ dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
+
+ /* Set the PCIe master AxDomain attributes */
+ reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
+
+ reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
+
+ /* Enable INT A-D interrupts */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
+ reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+ PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
+
+ return 0;
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+ struct armada8k_pcie *pcie = arg;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val;
+
+ /*
+ * Interrupts are directly handled by the device driver of the
+ * PCI device. However, they are also latched into the PCIe
+ * controller, so we simply discard them.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
+
+ return IRQ_HANDLED;
+}
+
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+ .host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->ops = &armada8k_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+ IRQF_SHARED, "armada8k-pcie", pcie);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
+};
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+ struct dw_pcie *pci;
+ struct armada8k_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ pcie->pci = pci;
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ pcie->clk_reg = devm_clk_get(dev, "reg");
+ if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+ if (!IS_ERR(pcie->clk_reg)) {
+ ret = clk_prepare_enable(pcie->clk_reg);
+ if (ret)
+ goto fail_clkreg;
+ }
+
+ /* Get the dw-pcie unit configuration/control registers base. */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
+ if (IS_ERR(pci->dbi_base)) {
+ ret = PTR_ERR(pci->dbi_base);
+ goto fail_clkreg;
+ }
+
+ ret = armada8k_pcie_setup_phys(pcie);
+ if (ret)
+ goto fail_clkreg;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = armada8k_add_pcie_port(pcie, pdev);
+ if (ret)
+ goto disable_phy;
+
+ return 0;
+
+disable_phy:
+ armada8k_pcie_disable_phys(pcie);
+fail_clkreg:
+ clk_disable_unprepare(pcie->clk_reg);
+fail:
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+ { .compatible = "marvell,armada8k-pcie", },
+ {},
+};
+
+static struct platform_driver armada8k_pcie_driver = {
+ .probe = armada8k_pcie_probe,
+ .driver = {
+ .name = "armada8k-pcie",
+ .of_match_table = armada8k_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
new file mode 100644
index 000000000..9b572a2b2
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
+
+enum artpec_pcie_variants {
+ ARTPEC6,
+ ARTPEC7,
+};
+
+struct artpec6_pcie {
+ struct dw_pcie *pci;
+ struct regmap *regmap; /* DT axis,syscon-pcie */
+ void __iomem *phy_base; /* DT phy */
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+};
+
+struct artpec_pcie_of_data {
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id artpec6_pcie_of_match[];
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG 0x18
+#define PCIECFG_DBG_OEN BIT(24)
+#define PCIECFG_CORE_RESET_REQ BIT(21)
+#define PCIECFG_LTSSM_ENABLE BIT(20)
+#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16)
+#define PCIECFG_CLKREQ_B BIT(11)
+#define PCIECFG_REFCLK_ENABLE BIT(10)
+#define PCIECFG_PLL_ENABLE BIT(9)
+#define PCIECFG_PCLK_ENABLE BIT(8)
+#define PCIECFG_RISRCREN BIT(4)
+#define PCIECFG_MODE_TX_DRV_EN BIT(3)
+#define PCIECFG_CISRREN BIT(2)
+#define PCIECFG_MACRO_ENABLE BIT(0)
+/* ARTPEC-7 specific fields */
+#define PCIECFG_REFCLKSEL BIT(23)
+#define PCIECFG_NOC_RESET BIT(3)
+
+#define PCIESTAT 0x1c
+/* ARTPEC-7 specific fields */
+#define PCIESTAT_EXTREFCLK BIT(3)
+
+#define NOCCFG 0x40
+#define NOCCFG_ENABLE_CLK_PCIE BIT(4)
+#define NOCCFG_POWER_PCIE_IDLEACK BIT(3)
+#define NOCCFG_POWER_PCIE_IDLE BIT(2)
+#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1)
+
+#define PHY_STATUS 0x118
+#define PHY_COSPLLLOCK BIT(0)
+
+#define PHY_TX_ASIC_OUT 0x4040
+#define PHY_TX_ASIC_OUT_TX_ACK BIT(0)
+
+#define PHY_RX_ASIC_OUT 0x405c
+#define PHY_RX_ASIC_OUT_ACK BIT(0)
+
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
+{
+ u32 val;
+
+ regmap_read(artpec6_pcie->regmap, offset, &val);
+ return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+ regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ switch (artpec6_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ return pci_addr - pp->cfg0_base;
+ case DW_PCIE_EP_TYPE:
+ return pci_addr - ep->phys_base;
+ default:
+ dev_err(pci->dev, "UNKNOWN device type\n");
+ }
+ return pci_addr;
+}
+
+static int artpec6_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_LTSSM_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+ return 0;
+}
+
+static void artpec6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val &= ~PCIECFG_LTSSM_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
+ .start_link = artpec6_pcie_establish_link,
+ .stop_link = artpec6_pcie_stop_link,
+};
+
+static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ unsigned int retries;
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+ if (!retries)
+ dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+ retries--;
+ } while (retries && !(val & PHY_COSPLLLOCK));
+ if (!retries)
+ dev_err(dev, "PHY PLL did not lock\n");
+}
+
+static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ u16 phy_status_tx, phy_status_rx;
+ unsigned int retries;
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+ if (!retries)
+ dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
+ phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
+ retries--;
+ } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
+ (phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
+ if (!retries)
+ dev_err(dev, "PHY did not enter Pn state\n");
+}
+
+static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
+{
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
+ break;
+ case ARTPEC7:
+ artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
+ break;
+ }
+}
+
+static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_MODE_TX_DRV_EN |
+ PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
+ PCIECFG_MACRO_ENABLE;
+ val |= PCIECFG_REFCLK_ENABLE;
+ val &= ~PCIECFG_DBG_OEN;
+ val &= ~PCIECFG_CLKREQ_B;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(5000, 6000);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+ usleep_range(20, 30);
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(6000, 7000);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+ struct dw_pcie *pci = artpec6_pcie->pci;
+ u32 val;
+ bool extrefclk;
+
+ /* Check if external reference clock is connected */
+ val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
+ extrefclk = !!(val & PCIESTAT_EXTREFCLK);
+ dev_dbg(pci->dev, "Using reference clock: %s\n",
+ extrefclk ? "external" : "internal");
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_PCLK_ENABLE;
+ if (extrefclk)
+ val |= PCIECFG_REFCLKSEL;
+ else
+ val &= ~PCIECFG_REFCLKSEL;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(10, 20);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+ usleep_range(20, 30);
+
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
+{
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ artpec6_pcie_init_phy_a6(artpec6_pcie);
+ break;
+ case ARTPEC7:
+ artpec6_pcie_init_phy_a7(artpec6_pcie);
+ break;
+ }
+}
+
+static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ val |= PCIECFG_CORE_RESET_REQ;
+ break;
+ case ARTPEC7:
+ val &= ~PCIECFG_NOC_RESET;
+ break;
+ }
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+ u32 val;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ switch (artpec6_pcie->variant) {
+ case ARTPEC6:
+ val &= ~PCIECFG_CORE_RESET_REQ;
+ break;
+ case ARTPEC7:
+ val |= PCIECFG_NOC_RESET;
+ break;
+ }
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+ usleep_range(100, 200);
+}
+
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
+ artpec6_pcie_assert_core_reset(artpec6_pcie);
+ artpec6_pcie_init_phy(artpec6_pcie);
+ artpec6_pcie_deassert_core_reset(artpec6_pcie);
+ artpec6_pcie_wait_for_phy(artpec6_pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+ .host_init = artpec6_pcie_host_init,
+};
+
+static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ enum pci_barno bar;
+
+ artpec6_pcie_assert_core_reset(artpec6_pcie);
+ artpec6_pcie_init_phy(artpec6_pcie);
+ artpec6_pcie_deassert_core_reset(artpec6_pcie);
+ artpec6_pcie_wait_for_phy(artpec6_pcie);
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = artpec6_pcie_ep_init,
+ .raise_irq = artpec6_pcie_raise_irq,
+};
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct artpec6_pcie *artpec6_pcie;
+ int ret;
+ const struct artpec_pcie_of_data *data;
+ enum artpec_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+ u32 val;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ variant = (enum artpec_pcie_variants)data->variant;
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
+ if (!artpec6_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ artpec6_pcie->pci = pci;
+ artpec6_pcie->variant = variant;
+ artpec6_pcie->mode = mode;
+
+ artpec6_pcie->phy_base =
+ devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(artpec6_pcie->phy_base))
+ return PTR_ERR(artpec6_pcie->phy_base);
+
+ artpec6_pcie->regmap =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "axis,syscon-pcie");
+ if (IS_ERR(artpec6_pcie->regmap))
+ return PTR_ERR(artpec6_pcie->regmap);
+
+ platform_set_drvdata(pdev, artpec6_pcie);
+
+ switch (artpec6_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
+ return -ENODEV;
+
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
+ return -ENODEV;
+
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+ val &= ~PCIECFG_DEVICE_TYPE_MASK;
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
+ }
+
+ return 0;
+}
+
+static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
+ .variant = ARTPEC6,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
+ .variant = ARTPEC6,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
+ .variant = ARTPEC7,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
+ .variant = ARTPEC7,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+ {
+ .compatible = "axis,artpec6-pcie",
+ .data = &artpec6_pcie_rc_of_data,
+ },
+ {
+ .compatible = "axis,artpec6-pcie-ep",
+ .data = &artpec6_pcie_ep_of_data,
+ },
+ {
+ .compatible = "axis,artpec7-pcie",
+ .data = &artpec7_pcie_rc_of_data,
+ },
+ {
+ .compatible = "axis,artpec7-pcie-ep",
+ .data = &artpec7_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+ .probe = artpec6_pcie_probe,
+ .driver = {
+ .name = "artpec6-pcie",
+ .of_match_table = artpec6_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
new file mode 100644
index 000000000..17e696797
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 PCIe controller driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Baikal-T1 System CCU control registers */
+#define BT1_CCU_PCIE_CLKC 0x140
+#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
+#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
+#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
+
+#define BT1_CCU_PCIE_RSTC 0x144
+#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
+#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
+#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
+#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
+#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
+#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
+
+#define BT1_CCU_PCIE_PMSC 0x148
+#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
+#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
+#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
+#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
+#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
+#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
+#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
+#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
+#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
+#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
+#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
+#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
+#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
+#define BT1_CCU_PCIE_LTSSM_L0 0x11
+#define BT1_CCU_PCIE_LTSSM_L0S 0x12
+#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
+#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
+#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
+#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
+#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
+#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
+#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
+#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
+#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
+#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
+#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
+#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
+#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
+#define BT1_CCU_PCIE_L1_PENDING BIT(12)
+#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
+#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
+#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
+#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
+#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
+#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
+#define BT1_CCU_PCIE_WAKE_DET BIT(24)
+#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
+#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
+
+#define BT1_CCU_PCIE_GENC 0x14c
+#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
+#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
+#define BT1_CCU_PCIE_MGMT_EN BIT(3)
+#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
+#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
+#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
+#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
+#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
+#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
+
+#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
+({ \
+ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
+ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
+})
+
+/* Baikal-T1 PCIe specific control registers */
+#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
+#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
+
+#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
+#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
+#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
+#define BT1_PCIE_AXI2MGM_DONE BIT(30)
+#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
+
+#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
+#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
+
+#define BT1_PCIE_AXI2MGM_READDATA 0xd10
+#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
+
+/* Generic Baikal-T1 PCIe interface resources */
+#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
+#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
+#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
+#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
+
+/* PCIe bus setup delays and timeouts */
+#define BT1_PCIE_RST_DELAY_MS 100
+#define BT1_PCIE_RUN_DELAY_US 100
+#define BT1_PCIE_REQ_DELAY_US 1
+#define BT1_PCIE_REQ_TIMEOUT_US 1000
+#define BT1_PCIE_LNK_DELAY_US 1000
+#define BT1_PCIE_LNK_TIMEOUT_US 1000000
+
+static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
+ DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
+};
+
+static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
+ DW_PCIE_REF_CLK,
+};
+
+static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
+ DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
+};
+
+static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
+ DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
+};
+
+struct bt1_pcie {
+ struct dw_pcie dw;
+ struct platform_device *pdev;
+ struct regmap *sys_regs;
+};
+#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
+
+/*
+ * Baikal-T1 MMIO space must be read/written by the dword-aligned
+ * instructions. Note the methods are optimized to have the dword operations
+ * performed with minimum overhead as the most frequently used ones.
+ */
+static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
+ if (size == 4) {
+ return 0;
+ } else if (size == 2) {
+ *val &= 0xffff;
+ return 0;
+ } else if (size == 1) {
+ *val &= 0xff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+ u32 tmp, mask;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ if (size == 4) {
+ writel(val, addr);
+ return 0;
+ } else if (size == 2 || size == 1) {
+ mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
+ tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
+ tmp |= (val & mask) << ofs * BITS_PER_BYTE;
+ writel(tmp, addr - ofs);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ ret = bt1_pcie_read_mmio(base + reg, size, &val);
+ if (ret) {
+ dev_err(pci->dev, "Read DBI address failed\n");
+ return ~0U;
+ }
+
+ return val;
+}
+
+static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI2 address failed\n");
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, 0);
+}
+
+static int bt1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable LTSSM and make sure it was able to establish both PHY and
+ * data links. This procedure shall work fine to reach 2.5 GT/s speed.
+ */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_SMLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_RDLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set data link up\n");
+ return ret;
+ }
+
+ /*
+ * Activate direct speed change after the link is established in an
+ * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
+ * This is required at least to get 8.0 GT/s speed.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ BT1_CCU_PCIE_LTSSM_LINKUP(val),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret)
+ dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
+
+ return ret;
+}
+
+static void bt1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+}
+
+static const struct dw_pcie_ops bt1_pcie_ops = {
+ .read_dbi = bt1_pcie_read_dbi,
+ .write_dbi = bt1_pcie_write_dbi,
+ .write_dbi2 = bt1_pcie_write_dbi2,
+ .start_link = bt1_pcie_start_link,
+ .stop_link = bt1_pcie_stop_link,
+};
+
+static struct pci_ops bt1_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ int i;
+
+ /* DBI access is supposed to be performed by the dword-aligned IOs */
+ btpci->dw.pp.bridge->ops = &bt1_pci_ops;
+
+ /* These CSRs are in MMIO so we won't check the regmap-methods status */
+ btpci->sys_regs =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
+ if (IS_ERR(btpci->sys_regs))
+ return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
+ "Failed to get syscon\n");
+
+ /* Make sure all the required resources have been specified */
+ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
+ if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
+ dev_err(dev, "App clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
+ if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
+ dev_err(dev, "Core clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
+ if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
+ dev_err(dev, "App resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
+ if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
+ dev_err(dev, "Core resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ int ret;
+
+ /* Disable LTSSM for sure */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+
+ /*
+ * Application reset controls are trigger-based so assert the core
+ * resets only.
+ */
+ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
+ if (ret)
+ dev_err(dev, "Failed to assert core resets\n");
+
+ /*
+ * Clocks are disabled by default at least in accordance with the clk
+ * enable counter value on init stage.
+ */
+ if (!init) {
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ }
+
+ /* The peripheral devices are unavailable anyway so reset them too */
+ gpiod_set_value_cansleep(pci->pe_rst, 1);
+
+ /* Make sure all the resets are settled */
+ msleep(BT1_PCIE_RST_DELAY_MS);
+}
+
+/*
+ * Implements the cold reset procedure in accordance with the reference manual
+ * and available PM signals.
+ */
+static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ u32 val;
+ int ret;
+
+ /* First get out of the Power/Hot reset state */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert hot reset\n");
+ goto err_assert_pwr_rst;
+ }
+
+ /* Wait for the PM-core to stop requesting the PHY reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_PHY_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
+ goto err_assert_hot_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ goto err_assert_hot_rst;
+ }
+
+ /* Clocks can be now enabled, but the ref one is crucial at this stage */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable app clocks\n");
+ goto err_assert_phy_rst;
+ }
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable ref clocks\n");
+ goto err_disable_app_clk;
+ }
+
+ /* Wait for the PM to stop requesting the controller core reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_CORE_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
+ goto err_disable_core_clk;
+ }
+
+ /* PCS-PIPE interface and controller core can be now activated */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PIPE reset\n");
+ goto err_disable_core_clk;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core reset\n");
+ goto err_assert_pipe_rst;
+ }
+
+ /* It's recommended to reset the core and application logic together */
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret) {
+ dev_err(dev, "Failed to reset app domain\n");
+ goto err_assert_core_rst;
+ }
+
+ /* Sticky/Non-sticky CSR flags can be now unreset too */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert sticky reset\n");
+ goto err_assert_core_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert non-sticky reset\n");
+ goto err_assert_sticky_rst;
+ }
+
+ /* Activate the PCIe bus peripheral devices */
+ gpiod_set_value_cansleep(pci->pe_rst, 0);
+
+ /* Make sure the state is settled (LTSSM is still disabled though) */
+ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
+
+ return 0;
+
+err_assert_sticky_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+
+err_assert_core_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+
+err_assert_pipe_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+
+err_disable_core_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+err_disable_app_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+err_assert_phy_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+
+err_assert_hot_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+
+err_assert_pwr_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+
+ return ret;
+}
+
+static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ ret = bt1_pcie_get_resources(btpci);
+ if (ret)
+ return ret;
+
+ bt1_pcie_full_stop_bus(btpci, true);
+
+ return bt1_pcie_cold_start_bus(btpci);
+}
+
+static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ bt1_pcie_full_stop_bus(btpci, false);
+}
+
+static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
+ .host_init = bt1_pcie_host_init,
+ .host_deinit = bt1_pcie_host_deinit,
+};
+
+static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
+ if (!btpci)
+ return ERR_PTR(-ENOMEM);
+
+ btpci->pdev = pdev;
+
+ platform_set_drvdata(pdev, btpci);
+
+ return btpci;
+}
+
+static int bt1_pcie_add_port(struct bt1_pcie *btpci)
+{
+ struct device *dev = &btpci->pdev->dev;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ btpci->dw.version = DW_PCIE_VER_460A;
+ btpci->dw.dev = dev;
+ btpci->dw.ops = &bt1_pcie_ops;
+
+ btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
+ btpci->dw.pp.ops = &bt1_pcie_host_ops;
+
+ dw_pcie_cap_set(&btpci->dw, REQ_RES);
+
+ ret = dw_pcie_host_init(&btpci->dw.pp);
+
+ return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
+}
+
+static void bt1_pcie_del_port(struct bt1_pcie *btpci)
+{
+ dw_pcie_host_deinit(&btpci->dw.pp);
+}
+
+static int bt1_pcie_probe(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = bt1_pcie_create_data(pdev);
+ if (IS_ERR(btpci))
+ return PTR_ERR(btpci);
+
+ return bt1_pcie_add_port(btpci);
+}
+
+static void bt1_pcie_remove(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci = platform_get_drvdata(pdev);
+
+ bt1_pcie_del_port(btpci);
+}
+
+static const struct of_device_id bt1_pcie_of_match[] = {
+ { .compatible = "baikal,bt1-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
+
+static struct platform_driver bt1_pcie_driver = {
+ .probe = bt1_pcie_probe,
+ .remove_new = bt1_pcie_remove,
+ .driver = {
+ .name = "bt1-pcie",
+ .of_match_table = bt1_pcie_of_match,
+ },
+};
+module_platform_driver(bt1_pcie_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
new file mode 100644
index 000000000..8d79dd0e1
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
+
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie_ep_func *ep_func;
+
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
+}
+
+static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
+{
+ unsigned int func_offset = 0;
+
+ if (ep->ops->func_conf_select)
+ func_offset = ep->ops->func_conf_select(ep, func_no);
+
+ return func_offset;
+}
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
+{
+ u32 reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi2(pci, reg, 0x0);
+ dw_pcie_writel_dbi(pci, reg, 0x0);
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+ dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ }
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
+
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 next_cap_ptr;
+ u16 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *hdr)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
+{
+ int ret;
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
+ if (free_win >= pci->num_ib_windows) {
+ dev_err(pci->dev, "No free inbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
+ if (ret < 0) {
+ dev_err(pci->dev, "Failed to program IB window\n");
+ return ret;
+ }
+
+ ep->bar_to_atu[bar] = free_win;
+ set_bit(free_win, ep->ib_window_map);
+
+ return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
+
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
+ dev_err(pci->dev, "No free outbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
+
+ set_bit(free_win, ep->ob_window_map);
+ ep->outbound_addr[free_win] = phys_addr;
+
+ return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
+
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
+ clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
+
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ if (ret)
+ return ret;
+
+ if (ep->epf_bar[bar])
+ return 0;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg + 4, 0);
+ }
+
+ ep->epf_bar[bar] = epf_bar;
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ u32 *atu_index)
+{
+ u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (index = 0; index < pci->num_ob_windows; index++) {
+ if (ep->outbound_addr[index] != addr)
+ continue;
+ *atu_index = index;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr)
+{
+ int ret;
+ u32 atu_index;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_find_index(ep, addr, &atu_index);
+ if (ret < 0)
+ return;
+
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
+ clear_bit(atu_index, ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to enable address\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSI_FLAGS_QMASK;
+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
+ dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_start_link(pci);
+}
+
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ return ep->ops->get_features(ep);
+}
+
+static const struct pci_epc_ops epc_ops = {
+ .write_header = dw_pcie_ep_write_header,
+ .set_bar = dw_pcie_ep_set_bar,
+ .clear_bar = dw_pcie_ep_clear_bar,
+ .map_addr = dw_pcie_ep_map_addr,
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
+ .set_msix = dw_pcie_ep_set_msix,
+ .get_msix = dw_pcie_ep_get_msix,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+ .get_features = dw_pcie_ep_get_features,
+};
+
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epc *epc = ep->epc;
+ unsigned int aligned_offset;
+ unsigned int func_offset = 0;
+ u16 msg_ctrl, msg_data;
+ u32 msg_addr_lower, msg_addr_upper, reg;
+ u64 msg_addr;
+ bool has_upper;
+ int ret;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ if (has_upper) {
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
+ } else {
+ msg_addr_upper = 0;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
+ }
+ aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
+ msg_addr = ((u64)msg_addr_upper) << 32 |
+ (msg_addr_lower & ~aligned_offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
+
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
+
+ return 0;
+}
+
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct pci_epc *epc = ep->epc;
+ unsigned int func_offset = 0;
+ u32 reg, msg_data, vec_ctrl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
+ int ret;
+ u8 bir;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
+
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+ dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
+ return -EPERM;
+ }
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr &= ~aligned_offset;
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data, ep->msi_mem + aligned_offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_edma_remove(pci);
+
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+ pci_epc_mem_exit(epc);
+}
+
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+ u32 header;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ while (pos) {
+ header = dw_pcie_readl_dbi(pci, pos);
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (!pos)
+ break;
+ }
+
+ return 0;
+}
+
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset, ptm_cap_base;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
+ int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ }
+
+ /*
+ * PTM responder capability can be disabled only after disabling
+ * PTM root capability.
+ */
+ if (ptm_cap_base) {
+ dw_pcie_dbi_ro_wr_en(pci);
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~PCI_PTM_CAP_ROOT;
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ void *addr;
+ u8 func_no;
+ struct resource *res;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
+ struct dw_pcie_ep_func *ep_func;
+
+ INIT_LIST_HEAD(&ep->func_list);
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ return -ENOMEM;
+
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ return -ENOMEM;
+
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "Failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize address space\n");
+ return ret;
+ }
+
+ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
+ epc->mem->window.page_size);
+ if (!ep->msi_mem) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
+ goto err_exit_epc_mem;
+ }
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_epc_mem;
+
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
+ }
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_remove_edma;
+
+ return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
new file mode 100644
index 000000000..a7170fd0e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_child_pcie_ops;
+
+static void dw_msi_ack_irq(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void dw_msi_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void dw_msi_unmask_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip dw_pcie_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_ack = dw_msi_ack_irq,
+ .irq_mask = dw_msi_mask_irq,
+ .irq_unmask = dw_msi_unmask_irq,
+};
+
+static struct msi_domain_info dw_pcie_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &dw_pcie_msi_irq_chip,
+};
+
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ int i, pos;
+ unsigned long val;
+ u32 status, num_ctrls;
+ irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ for (i = 0; i < num_ctrls; i++) {
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!status)
+ continue;
+
+ ret = IRQ_HANDLED;
+ val = status;
+ pos = 0;
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
+ pos++;
+ }
+ }
+
+ return ret;
+}
+
+/* Chained MSI interrupt service routine */
+static void dw_chained_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie_rp *pp;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ dw_handle_msi_irq(pp);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target;
+
+ msi_target = (u64)pp->msi_data;
+
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+
+ msg->data = d->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dw_pci_msi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void dw_pci_bottom_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_ack(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
+}
+
+static struct irq_chip dw_pci_msi_bottom_irq_chip = {
+ .name = "DWPCI-MSI",
+ .irq_ack = dw_pci_bottom_ack,
+ .irq_compose_msi_msg = dw_pci_setup_msi_msg,
+ .irq_set_affinity = dw_pci_msi_set_affinity,
+ .irq_mask = dw_pci_bottom_mask,
+ .irq_unmask = dw_pci_bottom_unmask,
+};
+
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct dw_pcie_rp *pp = domain->host_data;
+ unsigned long flags;
+ u32 i;
+ int bit;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+ order_base_2(nr_irqs));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+
+ if (bit < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, bit + i,
+ pp->msi_irq_chip,
+ pp, handle_edge_irq,
+ NULL, NULL);
+
+ return 0;
+}
+
+static void dw_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct dw_pcie_rp *pp = domain->host_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
+ order_base_2(nr_irqs));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
+ .alloc = dw_pcie_irq_domain_alloc,
+ .free = dw_pcie_irq_domain_free,
+};
+
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+
+ pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
+ &dw_pcie_msi_domain_ops, pp);
+ if (!pp->irq_domain) {
+ dev_err(pci->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
+ pp->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &dw_pcie_msi_domain_info,
+ pp->irq_domain);
+ if (!pp->msi_domain) {
+ dev_err(pci->dev, "Failed to create MSI domain\n");
+ irq_domain_remove(pp->irq_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
+
+ irq_domain_remove(pp->msi_domain);
+ irq_domain_remove(pp->irq_domain);
+}
+
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target = (u64)pp->msi_data;
+
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
+ return;
+
+ /* Program the msi_data */
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
+}
+
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack 64-bit message support. In
+ * order not to miss MSI TLPs from those devices the MSI target
+ * address has to be within the lowest 4GB.
+ *
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win;
+ struct pci_host_bridge *bridge;
+ struct resource *res;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+ } else {
+ dev_err(dev, "Missing *config* reg space\n");
+ return -ENODEV;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ /* Get the I/O range from DT */
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
+ }
+
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
+
+ if (pp->ops->host_init) {
+ ret = pp->ops->host_init(pp);
+ if (ret)
+ return ret;
+ }
+
+ if (pci_msi_enabled()) {
+ pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ of_property_read_bool(np, "msi-parent") ||
+ of_property_read_bool(np, "msi-map"));
+
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ ret = -EINVAL;
+ goto err_deinit_host;
+ }
+
+ if (pp->ops->msi_host_init) {
+ ret = pp->ops->msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
+ } else if (pp->has_msi_ctrl) {
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
+ }
+ }
+
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_msi;
+
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_remove_edma;
+
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ goto err_remove_edma;
+ }
+
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
+
+ bridge->sysdata = pp;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+err_free_msi:
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
+ dw_pcie_edma_remove(pci);
+
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
+
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
+
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (pci_is_root_bus(bus->parent))
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ type = PCIE_ATU_TYPE_CFG1;
+
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
+
+ return pp->va_cfg0_base + where;
+}
+
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
+
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+
+static struct pci_ops dw_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all out/inbound windows are disabled before proceeding with
+ * the MEM/IO (dma-)ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ for (i = 0; i < pci->num_ib_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
+ pci->num_ob_windows);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ib_windows <= i)
+ break;
+
+ ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set DMA range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pci->num_ib_windows <= i)
+ dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
+ pci->num_ib_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
+
+ /*
+ * Enable DBI read-only registers for writing/updating configuration.
+ * Write permission gets disabled towards the end of this function.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_setup(pci);
+
+ if (pp->has_msi_ctrl) {
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+ }
+
+ dw_pcie_msi_init(pp);
+
+ /* Setup RC BARs */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
+
+ /* Setup interrupt pins */
+ val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
+ val &= 0xffff00ff;
+ val |= 0x00000100;
+ dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
+
+ /* Setup bus numbers */
+ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
+ val &= 0xff000000;
+ val |= 0x00ff0100;
+ dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
+
+ /* Setup command register */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ val &= 0xffff0000;
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+ /*
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
+ */
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
+ }
+
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+ /* Program correct class for RC */
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int ret;
+
+ /*
+ * If L1SS is supported, then do not put the link into L2 as some
+ * devices such as NVMe expect low resume latency.
+ */
+ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
+ return 0;
+
+ if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
+ return 0;
+
+ if (!pci->pp.ops->pme_turn_off)
+ return 0;
+
+ pci->pp.ops->pme_turn_off(&pci->pp);
+
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
+ if (ret) {
+ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ return ret;
+ }
+
+ if (pci->pp.ops->host_deinit)
+ pci->pp.ops->host_deinit(&pci->pp);
+
+ pci->suspended = true;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
+
+int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ int ret;
+
+ if (!pci->suspended)
+ return 0;
+
+ pci->suspended = false;
+
+ if (pci->pp.ops->host_init) {
+ ret = pci->pp.ops->host_init(&pci->pp);
+ if (ret) {
+ dev_err(pci->dev, "Host init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
new file mode 100644
index 000000000..b625841e9
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+struct dw_plat_pcie {
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dw_plat_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+};
+
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dw_plat_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dw_plat_pcie_ep_init,
+ .raise_irq = dw_plat_pcie_ep_raise_irq,
+ .get_features = dw_plat_pcie_get_features,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->ops = &dw_plat_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_plat_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_plat_pcie *dw_plat_pcie;
+ struct dw_pcie *pci;
+ int ret;
+ const struct dw_plat_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+ if (!dw_plat_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+
+ dw_plat_pcie->pci = pci;
+ dw_plat_pcie->mode = mode;
+
+ platform_set_drvdata(pdev, dw_plat_pcie);
+
+ switch (dw_plat_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &pcie_ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[] = {
+ {
+ .compatible = "snps,dw-pcie",
+ .data = &dw_plat_pcie_rc_of_data,
+ },
+ {
+ .compatible = "snps,dw-pcie-ep",
+ .data = &dw_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver dw_plat_pcie_driver = {
+ .driver = {
+ .name = "dw-pcie",
+ .of_match_table = dw_plat_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = dw_plat_pcie_probe,
+};
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
new file mode 100644
index 000000000..2b60d20df
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
+ [DW_PCIE_DBI_CLK] = "dbi",
+ [DW_PCIE_MSTR_CLK] = "mstr",
+ [DW_PCIE_SLV_CLK] = "slv",
+};
+
+static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
+ [DW_PCIE_PIPE_CLK] = "pipe",
+ [DW_PCIE_CORE_CLK] = "core",
+ [DW_PCIE_AUX_CLK] = "aux",
+ [DW_PCIE_REF_CLK] = "ref",
+};
+
+static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
+ [DW_PCIE_DBI_RST] = "dbi",
+ [DW_PCIE_MSTR_RST] = "mstr",
+ [DW_PCIE_SLV_RST] = "slv",
+};
+
+static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
+ [DW_PCIE_NON_STICKY_RST] = "non-sticky",
+ [DW_PCIE_STICKY_RST] = "sticky",
+ [DW_PCIE_CORE_RST] = "core",
+ [DW_PCIE_PIPE_RST] = "pipe",
+ [DW_PCIE_PHY_RST] = "phy",
+ [DW_PCIE_HOT_RST] = "hot",
+ [DW_PCIE_PWR_RST] = "pwr",
+};
+
+static int dw_pcie_get_clocks(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
+ pci->app_clks[i].id = dw_pcie_app_clks[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
+ pci->core_clks[i].id = dw_pcie_core_clks[i];
+
+ ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
+ pci->app_clks);
+ if (ret)
+ return ret;
+
+ return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
+ pci->core_clks);
+}
+
+static int dw_pcie_get_resets(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
+ pci->app_rsts[i].id = dw_pcie_app_rsts[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
+ pci->core_rsts[i].id = dw_pcie_core_rsts[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
+ DW_PCIE_NUM_APP_RSTS,
+ pci->app_rsts);
+ if (ret)
+ return ret;
+
+ ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
+ DW_PCIE_NUM_CORE_RSTS,
+ pci->core_rsts);
+ if (ret)
+ return ret;
+
+ pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pci->pe_rst))
+ return PTR_ERR(pci->pe_rst);
+
+ return 0;
+}
+
+int dw_pcie_get_resources(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct device_node *np = dev_of_node(pci->dev);
+ struct resource *res;
+ int ret;
+
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ /* DBI2 is mainly useful for the endpoint controller */
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (res) {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ } else {
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ }
+ }
+
+ /* For non-unrolled iATU/eDMA platforms this range will be ignored */
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Set a default value suitable for at most 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
+ /* LLDD is supposed to manually switch the clocks and resets state */
+ if (dw_pcie_cap_is(pci, REQ_RES)) {
+ ret = dw_pcie_get_clocks(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_get_resets(pci);
+ if (ret)
+ return ret;
+ }
+
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+
+ if (of_property_read_bool(np, "snps,enable-cdm-check"))
+ dw_pcie_cap_set(pci, CDM_CHECK);
+
+ return 0;
+}
+
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
+/*
+ * These interfaces resemble the pci_find_*capability() interfaces, but these
+ * are for configuring host controllers, which are bridges *to* PCI devices but
+ * are not PCI devices themselves.
+ */
+static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+ u8 cap)
+{
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+{
+ u8 next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
+ u8 cap)
+{
+ u32 header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (start)
+ pos = start;
+
+ header = dw_pcie_readl_dbi(pci, pos);
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ header = dw_pcie_readl_dbi(pci, pos);
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+{
+ return dw_pcie_find_next_ext_capability(pci, 0, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
+
+int dw_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ writel(val, addr);
+ else if (size == 2)
+ writew(val, addr);
+ else if (size == 1)
+ writeb(val, addr);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
+
+ ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DBI address failed\n");
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
+
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->dbi_base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
+
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops && pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
+}
+
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
+{
+ if (dw_pcie_cap_is(pci, IATU_UNROLL))
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
+ int ret;
+ u32 val;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, base, reg, 4);
+
+ ret = dw_pcie_read(base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read ATU address failed\n");
+
+ return val;
+}
+
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
+{
+ void __iomem *base;
+ int ret;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, base, reg, 4, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, 4, val);
+ if (ret)
+ dev_err(pci->dev, "Write ATU address failed\n");
+}
+
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
+}
+
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
+}
+
+static inline u32 dw_pcie_enable_ecrc(u32 val)
+{
+ /*
+ * DesignWare core version 4.90A has a design issue where the 'TD'
+ * bit in the Control register-1 of the ATU outbound region acts
+ * like an override for the ECRC setting, i.e., the presence of TLP
+ * Digest (ECRC) in the outgoing TLPs is solely determined by this
+ * bit. This is contrary to the PCIe spec which says that the
+ * enablement of the ECRC is solely determined by the AER
+ * registers.
+ *
+ * Because of this, even when the ECRC is enabled through AER
+ * registers, the transactions going through ATU won't have TLP
+ * Digest as there is no way the PCI core AER code could program
+ * the TD bit which is specific to the DesignWare core.
+ *
+ * The best way to handle this scenario is to program the TD bit
+ * always. It affects only the traffic from root port to downstream
+ * devices.
+ *
+ * At this point,
+ * When ECRC is enabled in AER registers, everything works normally
+ * When ECRC is NOT enabled in AER registers, then,
+ * on Root Port:- TLP Digest (DWord size) gets appended to each packet
+ * even through it is not required. Since downstream
+ * TLPs are mostly for configuration accesses and BAR
+ * accesses, they are not in critical path and won't
+ * have much negative effect on the performance.
+ * on End Point:- TLP Digest is received for some/all the packets coming
+ * from the root port. TLP Digest is ignored because,
+ * as per the PCIe Spec r5.0 v1.0 section 2.2.3
+ * "TLP Digest Rules", when an endpoint receives TLP
+ * Digest when its ECRC check functionality is disabled
+ * in AER registers, received TLP Digest is just ignored.
+ * Since there is no issue or error reported either side, best way to
+ * handle the scenario is to program TD bit by default.
+ */
+
+ return val | PCIE_ATU_TD;
+}
+
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ u32 retries, val;
+ u64 limit_addr;
+
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
+{
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
+}
+
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ u64 limit_addr = pci_addr + size - 1;
+ u32 retries, val;
+
+ if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(pci_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ val = type;
+ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar)
+{
+ u32 retries, val;
+
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ return -EINVAL;
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ mdelay(LINK_WAIT_IATU);
+ }
+
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
+{
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
+}
+
+int dw_pcie_wait_for_link(struct dw_pcie *pci)
+{
+ u32 offset, val;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (dw_pcie_link_up(pci))
+ break;
+
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_info(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
+
+int dw_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->link_up)
+ return pci->ops->link_up(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
+
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+ val |= PORT_MLTI_UPCFG_SUPPORT;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
+
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+{
+ u32 cap, ctrl2, link_speed;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
+
+ switch (pcie_link_speed[link_gen]) {
+ case PCIE_SPEED_2_5GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
+ break;
+ case PCIE_SPEED_5_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ case PCIE_SPEED_8_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
+ break;
+ case PCIE_SPEED_16_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
+ break;
+ default:
+ /* Use hardware capability */
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
+ break;
+ }
+
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
+
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
+
+}
+
+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+{
+ u32 lnkcap, lwsc, plc;
+ u8 cap;
+
+ if (!num_lanes)
+ return;
+
+ /* Set the number of lanes */
+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ plc &= ~PORT_LINK_FAST_LINK_MODE;
+ plc &= ~PORT_LINK_MODE_MASK;
+
+ /* Set link width speed control register */
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ switch (num_lanes) {
+ case 1:
+ plc |= PORT_LINK_MODE_1_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ plc |= PORT_LINK_MODE_2_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ plc |= PORT_LINK_MODE_4_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ case 8:
+ plc |= PORT_LINK_MODE_8_LANES;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
+
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+}
+
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
+{
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xFFFFFFFF) {
+ dw_pcie_cap_set(pci, IATU_UNROLL);
+
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
+
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
+ }
+
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
+ }
+
+ pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
+
+ dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
+ dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
+}
+
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
+{
+ u32 val = 0;
+ int ret;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_plat_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
+ } else {
+ return -ENODEV;
+ }
+
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[6];
+ int ret;
+
+ if (pci->edma.nr_irqs == 1)
+ return 0;
+ else if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->link_gen > 0)
+ dw_pcie_link_set_max_speed(pci, pci->link_gen);
+
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ if (dw_pcie_cap_is(pci, CDM_CHECK)) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
new file mode 100644
index 000000000..ef0b2efa9
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -0,0 +1,645 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/reset.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
+/* DWC PCIe controller capabilities */
+#define DW_PCIE_CAP_REQ_RES 0
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
+
+#define dw_pcie_cap_is(_pci, _cap) \
+ test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+#define dw_pcie_cap_set(_pci, _cap) \
+ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES 5
+#define LINK_WAIT_IATU 9
+
+/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR 0x70C
+#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
+#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
+
+#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
+#define PORT_LINK_MODE_MASK GENMASK(21, 16)
+#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+
+#define PCIE_PORT_DEBUG0 0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
+#define PORT_LOGIC_SPEED_CHANGE BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
+
+#define PCIE_MSI_ADDR_LO 0x820
+#define PCIE_MSI_ADDR_HI 0x824
+#define PCIE_MSI_INTR0_ENABLE 0x828
+#define PCIE_MSI_INTR0_MASK 0x82C
+#define PCIE_MSI_INTR0_STATUS 0x830
+
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
+#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
+#define PCIE_ATU_TYPE_MEM 0x0
+#define PCIE_ATU_TYPE_IO 0x2
+#define PCIE_ATU_TYPE_CFG0 0x4
+#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TD BIT(8)
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
+#define PCIE_ATU_REGION_CTRL2 0x004
+#define PCIE_ATU_ENABLE BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
+#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
+
+#define PCIE_MISC_CONTROL_1_OFF 0x8BC
+#define PCIE_DBI_RO_WR_EN BIT(0)
+
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+
+/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
+#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
+
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1 0x00
+#define PCIE_ATU_UNR_REGION_CTRL2 0x04
+#define PCIE_ATU_UNR_LOWER_BASE 0x08
+#define PCIE_ATU_UNR_UPPER_BASE 0x0C
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_TARGET 0x14
+#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
+
+/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
+
+#define MAX_MSI_IRQS 256
+#define MAX_MSI_IRQS_PER_CTRL 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE 12
+#define MSI_DEF_NUM_VECTORS 32
+
+/* Maximum number of inbound/outbound iATUs */
+#define MAX_IATU_IN 256
+#define MAX_IATU_OUT 256
+
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
+struct dw_pcie;
+struct dw_pcie_rp;
+struct dw_pcie_ep;
+
+enum dw_pcie_device_mode {
+ DW_PCIE_UNKNOWN_TYPE,
+ DW_PCIE_EP_TYPE,
+ DW_PCIE_LEG_EP_TYPE,
+ DW_PCIE_RC_TYPE,
+};
+
+enum dw_pcie_app_clk {
+ DW_PCIE_DBI_CLK,
+ DW_PCIE_MSTR_CLK,
+ DW_PCIE_SLV_CLK,
+ DW_PCIE_NUM_APP_CLKS
+};
+
+enum dw_pcie_core_clk {
+ DW_PCIE_PIPE_CLK,
+ DW_PCIE_CORE_CLK,
+ DW_PCIE_AUX_CLK,
+ DW_PCIE_REF_CLK,
+ DW_PCIE_NUM_CORE_CLKS
+};
+
+enum dw_pcie_app_rst {
+ DW_PCIE_DBI_RST,
+ DW_PCIE_MSTR_RST,
+ DW_PCIE_SLV_RST,
+ DW_PCIE_NUM_APP_RSTS
+};
+
+enum dw_pcie_core_rst {
+ DW_PCIE_NON_STICKY_RST,
+ DW_PCIE_STICKY_RST,
+ DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST,
+ DW_PCIE_PHY_RST,
+ DW_PCIE_HOT_RST,
+ DW_PCIE_PWR_RST,
+ DW_PCIE_NUM_CORE_RSTS
+};
+
+enum dw_pcie_ltssm {
+ /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
+ DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
+ DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L2_IDLE = 0x15,
+
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+};
+
+struct dw_pcie_host_ops {
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
+ void (*pme_turn_off)(struct dw_pcie_rp *pp);
+};
+
+struct dw_pcie_rp {
+ bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
+ u64 cfg0_base;
+ void __iomem *va_cfg0_base;
+ u32 cfg0_size;
+ resource_size_t io_base;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ int irq;
+ const struct dw_pcie_host_ops *ops;
+ int msi_irq[MAX_MSI_CTRLS];
+ struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
+ dma_addr_t msi_data;
+ struct irq_chip *msi_irq_chip;
+ u32 num_vectors;
+ u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_host_bridge *bridge;
+ raw_spinlock_t lock;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+ int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num);
+ const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
+};
+
+struct dw_pcie_ep {
+ struct pci_epc *epc;
+ struct list_head func_list;
+ const struct dw_pcie_ep_ops *ops;
+ phys_addr_t phys_base;
+ size_t addr_size;
+ size_t page_size;
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
+ phys_addr_t *outbound_addr;
+ unsigned long *ib_window_map;
+ unsigned long *ob_window_map;
+ void __iomem *msi_mem;
+ phys_addr_t msi_mem_phys;
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
+};
+
+struct dw_pcie_ops {
+ u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+ u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
+ int (*link_up)(struct dw_pcie *pcie);
+ enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
+ int (*start_link)(struct dw_pcie *pcie);
+ void (*stop_link)(struct dw_pcie *pcie);
+};
+
+struct dw_pcie {
+ struct device *dev;
+ void __iomem *dbi_base;
+ void __iomem *dbi_base2;
+ void __iomem *atu_base;
+ size_t atu_size;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
+ struct dw_pcie_ep ep;
+ const struct dw_pcie_ops *ops;
+ u32 version;
+ u32 type;
+ unsigned long caps;
+ int num_lanes;
+ int link_gen;
+ u8 n_fts[2];
+ struct dw_edma_chip edma;
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
+ struct gpio_desc *pe_rst;
+ bool suspended;
+};
+
+#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+
+#define to_dw_pcie_from_ep(endpoint) \
+ container_of((endpoint), struct dw_pcie, ep)
+
+int dw_pcie_get_resources(struct dw_pcie *pci);
+
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_write(void __iomem *addr, int size, u32 val);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+int dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
+int dw_pcie_wait_for_link(struct dw_pcie *pci);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
+void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
+
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+ dw_pcie_write_dbi(pci, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return dw_pcie_read_dbi(pci, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val |= PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
+{
+ u32 reg;
+ u32 val;
+
+ reg = PCIE_MISC_CONTROL_1_OFF;
+ val = dw_pcie_readl_dbi(pci, reg);
+ val &= ~PCIE_DBI_RO_WR_EN;
+ dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
+static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->get_ltssm)
+ return pci->ops->get_ltssm(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+
+ return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
+}
+
+#ifdef CONFIG_PCIE_DW_HOST
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+#else
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
+{
+ return IRQ_NONE;
+}
+
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+}
+
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u8 interrupt_num)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
+#endif
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000..2fe42c700
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct dw_pcie_rp *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = reset_control_deassert(rockchip->rst);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 000000000..1e9b44b8b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int ret;
+ u32 orig, tmp;
+
+ /*
+ * Force 2.5GT/s when starting the link, due to some devices not
+ * probing at higher speeds. This happens with the PCIe switch
+ * on the Unmatched board when U-Boot has not initialised the PCIe.
+ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
+ * by the soft reset done by this driver.
+ */
+ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ orig = tmp & PCI_EXP_LNKCAP_SLS;
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start\n");
+ goto err;
+ }
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
+ dev_dbg(dev, "changing speed back to original\n");
+
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= orig;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start at new speed\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ WARN_ON(ret); /* we assume that errors will be very rare */
+ dw_pcie_dbi_ro_wr_dis(pci);
+ return ret;
+}
+
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .host_init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+ pci->pp.num_vectors = MAX_MSI_IRQS;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
new file mode 100644
index 000000000..8904b5b85
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon SoCs
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Zhou Wang <wangzhou1@hisilicon.com>
+ * Dacai Zhu <zhudacai@hisilicon.com>
+ * Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+struct hisi_pcie {
+ void __iomem *reg_base;
+};
+
+static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_read32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_write32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct hisi_pcie *pcie = cfg->priv;
+
+ if (bus->number == cfg->busr.start)
+ return pcie->reg_base + where;
+ else
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ /*
+ * Retrieve RC base and size from a HISI0081 device with _UID
+ * matching our segment.
+ */
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return -ENOMEM;
+ }
+
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
+ return -ENOMEM;
+
+ cfg->priv = pcie;
+ return 0;
+}
+
+const struct pci_ecam_ops hisi_pcie_ops = {
+ .init = hisi_pcie_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_rd_conf,
+ .write = hisi_pcie_wr_conf,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
+
+static int hisi_pcie_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
+ return -ENOMEM;
+
+ cfg->priv = pcie;
+ return 0;
+}
+
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
+ .init = hisi_pcie_platform_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_rd_conf,
+ .write = hisi_pcie_wr_conf,
+ }
+};
+
+static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
+ {
+ .compatible = "hisilicon,hip06-pcie-ecam",
+ .data = &hisi_pcie_platform_ops,
+ },
+ {
+ .compatible = "hisilicon,hip07-pcie-ecam",
+ .data = &hisi_pcie_platform_ops,
+ },
+ {},
+};
+
+static struct platform_driver hisi_pcie_almost_ecam_driver = {
+ .probe = pci_host_common_probe,
+ .driver = {
+ .name = "hisi-pcie-almost-ecam",
+ .of_match_table = hisi_pcie_almost_ecam_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(hisi_pcie_almost_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
new file mode 100644
index 000000000..fd484cc7c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ * Jianguo Sun <sunjianguo1@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0 0x0000
+#define PCIE_SYS_CTRL1 0x0004
+#define PCIE_SYS_CTRL7 0x001C
+#define PCIE_SYS_CTRL13 0x0034
+#define PCIE_SYS_CTRL15 0x003C
+#define PCIE_SYS_CTRL16 0x0040
+#define PCIE_SYS_CTRL17 0x0044
+
+#define PCIE_SYS_STAT0 0x0100
+#define PCIE_SYS_STAT4 0x0110
+
+#define PCIE_RDLH_LINK_UP BIT(5)
+#define PCIE_XMLH_LINK_UP BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+#define PCIE_APP_LTSSM_ENABLE BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28)
+#define PCIE_WM_EP 0
+#define PCIE_WM_LEGACY BIT(1)
+#define PCIE_WM_RC BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE 0x11
+
+struct histb_pcie {
+ struct dw_pcie *pci;
+ struct clk *aux_clk;
+ struct clk *pipe_clk;
+ struct clk *sys_clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct reset_control *soft_reset;
+ struct reset_control *sys_reset;
+ struct reset_control *bus_reset;
+ void __iomem *ctrl;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vpcie;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+ return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+ writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ u32 val;
+
+ histb_pcie_dbi_r_mode(&pci->pp, true);
+ dw_pcie_read(base + reg, size, &val);
+ histb_pcie_dbi_r_mode(&pci->pp, false);
+
+ return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ histb_pcie_dbi_w_mode(&pci->pp, true);
+ dw_pcie_write(base + reg, size, val);
+ histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+ u32 status;
+
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+ status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+ status &= PCIE_LTSSM_STATE_MASK;
+ if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE))
+ return 1;
+
+ return 0;
+}
+
+static int histb_pcie_start_link(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ /* assert LTSSM enable */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+ regval |= PCIE_APP_LTSSM_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+ return 0;
+}
+
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ pp->bridge->ops = &histb_pci_ops;
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
+ .host_init = histb_pcie_host_init,
+};
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_assert(hipcie->bus_reset);
+
+ clk_disable_unprepare(hipcie->aux_clk);
+ clk_disable_unprepare(hipcie->pipe_clk);
+ clk_disable_unprepare(hipcie->sys_clk);
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 1);
+
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
+}
+
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* power on PCIe device if have */
+ if (hipcie->vpcie) {
+ ret = regulator_enable(hipcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 0);
+
+ ret = clk_prepare_enable(hipcie->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable bus clk\n");
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->sys_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable sys clk\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clk\n");
+ goto err_pipe_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clk\n");
+ goto err_aux_clk;
+ }
+
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_deassert(hipcie->soft_reset);
+
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_deassert(hipcie->sys_reset);
+
+ reset_control_assert(hipcie->bus_reset);
+ reset_control_deassert(hipcie->bus_reset);
+
+ return 0;
+
+err_aux_clk:
+ clk_disable_unprepare(hipcie->pipe_clk);
+err_pipe_clk:
+ clk_disable_unprepare(hipcie->sys_clk);
+err_sys_clk:
+ clk_disable_unprepare(hipcie->bus_clk);
+err_bus_clk:
+ if (hipcie->vpcie)
+ regulator_disable(hipcie->vpcie);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = histb_pcie_read_dbi,
+ .write_dbi = histb_pcie_write_dbi,
+ .link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie;
+ struct dw_pcie *pci;
+ struct dw_pcie_rp *pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+ if (!hipcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ hipcie->pci = pci;
+ pp = &pci->pp;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control");
+ if (IS_ERR(hipcie->ctrl)) {
+ dev_err(dev, "cannot get control reg base\n");
+ return PTR_ERR(hipcie->ctrl);
+ }
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi");
+ if (IS_ERR(pci->dbi_base)) {
+ dev_err(dev, "cannot get rc-dbi base\n");
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+ if (IS_ERR(hipcie->vpcie)) {
+ if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+ return PTR_ERR(hipcie->vpcie);
+ hipcie->vpcie = NULL;
+ }
+
+ hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio);
+ if (ret) {
+ dev_err(dev, "unable to request reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(hipcie->reset_gpio,
+ "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to set reset gpio name: %d\n", ret);
+ return ret;
+ }
+
+ hipcie->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(hipcie->aux_clk)) {
+ dev_err(dev, "Failed to get PCIe aux clk\n");
+ return PTR_ERR(hipcie->aux_clk);
+ }
+
+ hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(hipcie->pipe_clk)) {
+ dev_err(dev, "Failed to get PCIe pipe clk\n");
+ return PTR_ERR(hipcie->pipe_clk);
+ }
+
+ hipcie->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_clk)) {
+ dev_err(dev, "Failed to get PCIEe sys clk\n");
+ return PTR_ERR(hipcie->sys_clk);
+ }
+
+ hipcie->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_clk)) {
+ dev_err(dev, "Failed to get PCIe bus clk\n");
+ return PTR_ERR(hipcie->bus_clk);
+ }
+
+ hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+ if (IS_ERR(hipcie->soft_reset)) {
+ dev_err(dev, "couldn't get soft reset\n");
+ return PTR_ERR(hipcie->soft_reset);
+ }
+
+ hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_reset)) {
+ dev_err(dev, "couldn't get sys reset\n");
+ return PTR_ERR(hipcie->sys_reset);
+ }
+
+ hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_reset)) {
+ dev_err(dev, "couldn't get bus reset\n");
+ return PTR_ERR(hipcie->bus_reset);
+ }
+
+ hipcie->phy = devm_phy_get(dev, "phy");
+ if (IS_ERR(hipcie->phy)) {
+ dev_info(dev, "no pcie-phy found\n");
+ hipcie->phy = NULL;
+ /* fall through here!
+ * if no pcie-phy found, phy init
+ * should be done under boot!
+ */
+ } else {
+ phy_init(hipcie->phy);
+ }
+
+ pp->ops = &histb_pcie_host_ops;
+
+ platform_set_drvdata(pdev, hipcie);
+
+ ret = histb_pcie_host_enable(pp);
+ if (ret) {
+ dev_err(dev, "failed to enable host\n");
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void histb_pcie_remove(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+ histb_pcie_host_disable(hipcie);
+
+ if (hipcie->phy)
+ phy_exit(hipcie->phy);
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+ { .compatible = "hisilicon,hi3798cv200-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+ .probe = histb_pcie_probe,
+ .remove_new = histb_pcie_remove,
+ .driver = {
+ .name = "histb-pcie",
+ .of_match_table = histb_pcie_of_match,
+ },
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 000000000..c9c93524e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3 180
+#define PORT_AFR_N_FTS_GEN4 196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR 0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
+
+#define PCIE_APP_MSG_CR 0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
+
+#define PCIE_APP_PMC 0x44
+#define PCIE_APP_PMC_IN_L2 BIT(20)
+
+#define PCIE_APP_IRNEN 0xF4
+#define PCIE_APP_IRNCR 0xF8
+#define PCIE_APP_IRN_AER_REPORT BIT(0)
+#define PCIE_APP_IRN_PME BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
+#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_INTA BIT(13)
+#define PCIE_APP_IRN_INTB BIT(14)
+#define PCIE_APP_IRN_INTC BIT(15)
+#define PCIE_APP_IRN_INTD BIT(16)
+#define PCIE_APP_IRN_MSG_LTR BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
+#define PCIE_APP_INTX_OFST 12
+
+#define PCIE_APP_IRN_INT \
+ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
+ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
+
+#define BUS_IATU_OFFSET SZ_256M
+#define RESET_INTERVAL_MS 100
+
+struct intel_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct gpio_desc *reset_gpio;
+ u32 rst_intrvl;
+ struct clk *core_clk;
+ struct reset_control *core_rst;
+ struct phy *phy;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+ u32 old;
+
+ old = readl(base + ofs);
+ val = (old & ~mask) | (val & mask);
+
+ if (val != old)
+ writel(val, base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ writel(val, pcie->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs)
+{
+ return dw_pcie_readl_dbi(&pcie->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ dw_pcie_writel_dbi(&pcie->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie *pcie)
+{
+ u32 val;
+ u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP);
+
+ val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL);
+
+ val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+ pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
+{
+ switch (pci->link_gen) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
+ break;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
+ break;
+ default:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
+ break;
+ }
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ int ret;
+
+ pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Make initial reset last for 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie *pcie)
+{
+ reset_control_assert(pcie->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie)
+{
+ /*
+ * One micro-second delay to make sure the reset pulse
+ * wide enough so that core reset is clean.
+ */
+ udelay(1);
+ reset_control_deassert(pcie->core_rst);
+
+ /*
+ * Some SoC core reset also reset PHY, more delay needed
+ * to make sure the reset process is done.
+ */
+ usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie)
+{
+ msleep(pcie->rst_intrvl);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr(pcie, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ pcie->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->core_clk)) {
+ ret = PTR_ERR(pcie->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clks: %d\n", ret);
+ return ret;
+ }
+
+ pcie->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(pcie->core_rst)) {
+ ret = PTR_ERR(pcie->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get resets: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "reset-assert-ms",
+ &pcie->rst_intrvl);
+ if (ret)
+ pcie->rst_intrvl = RESET_INTERVAL_MS;
+
+ pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(pcie->app_base))
+ return PTR_ERR(pcie->app_base);
+
+ pcie->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie *pcie)
+{
+ u32 value;
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ if (pci->link_gen < 3)
+ return 0;
+
+ /* Send PME_TURN_OFF message */
+ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+ /* Read PMC status and wait for falling into L2 link state */
+ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value,
+ value & PCIE_APP_PMC_IN_L2, 20,
+ jiffies_to_usecs(5 * HZ));
+ if (ret)
+ dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n");
+
+ return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie *pcie)
+{
+ if (dw_pcie_link_up(&pcie->pci))
+ intel_pcie_wait_l2(pcie);
+
+ /* Put endpoint device in reset state */
+ intel_pcie_device_rst_assert(pcie);
+ pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie *pcie)
+{
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ intel_pcie_core_rst_assert(pcie);
+ intel_pcie_device_rst_assert(pcie);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ intel_pcie_core_rst_deassert(pcie);
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret);
+ goto clk_err;
+ }
+
+ pci->atu_base = pci->dbi_base + 0xC0000;
+
+ intel_pcie_ltssm_disable(pcie);
+ intel_pcie_link_setup(pcie);
+ intel_pcie_init_n_fts(pci);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(pcie);
+ intel_pcie_ltssm_enable(pcie);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto app_init_err;
+
+ /* Enable integrated interrupts */
+ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ PCIE_APP_IRN_INT);
+
+ return 0;
+
+app_init_err:
+ clk_disable_unprepare(pcie->core_clk);
+clk_err:
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+
+ return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie *pcie)
+{
+ intel_pcie_core_irq_disable(pcie);
+ intel_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->core_clk);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+}
+
+static void intel_pcie_remove(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+
+ dw_pcie_host_deinit(pp);
+ __intel_pcie_remove(pcie);
+}
+
+static int intel_pcie_suspend_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ intel_pcie_core_irq_disable(pcie);
+ ret = intel_pcie_wait_l2(pcie);
+ if (ret)
+ return ret;
+
+ phy_exit(pcie->phy);
+ clk_disable_unprepare(pcie->core_clk);
+ return ret;
+}
+
+static int intel_pcie_resume_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ return cpu_addr + BUS_IATU_OFFSET;
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+ .cpu_addr_fixup = intel_pcie_cpu_addr,
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+ .host_init = intel_pcie_rc_init,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pcie);
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pp = &pci->pp;
+
+ ret = intel_pcie_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ ret = intel_pcie_ep_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ pci->ops = &intel_pcie_ops;
+ pp->ops = &intel_pcie_dw_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Cannot initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+ { .compatible = "intel,lgm-pcie" },
+ {}
+};
+
+static struct platform_driver intel_pcie_driver = {
+ .probe = intel_pcie_probe,
+ .remove_new = intel_pcie_remove,
+ .driver = {
+ .name = "intel-gw-pcie",
+ .of_match_table = of_intel_pcie_match,
+ .pm = &intel_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000..289bff99d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline void keembay_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct dw_pcie_rp *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
new file mode 100644
index 000000000..2ee146767
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
+ * https://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR 0x000
+#define SOC_PCIECTRL_CTRL1_ADDR 0x004
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE 0x01c
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PCIE_LINKUP_ENABLE (0x8020)
+#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET 0x60
+#define SCTRL_PCIE_CMOS_BIT 0x10
+#define SCTRL_PCIE_ISO_OFFSET 0x44
+#define SCTRL_PCIE_ISO_BIT 0x30
+#define SCTRL_PCIE_HPCLK_OFFSET 0x190
+#define SCTRL_PCIE_HPCLK_BIT 0x184000
+#define SCTRL_PCIE_OE_OFFSET 0x14a
+#define PCIE_DEBOUNCE_PARAM 0xF0F400
+#define PCIE_OE_BYPASS (0x3 << 28)
+
+/*
+ * Max number of connected PCI slots at an external PCI bridge
+ *
+ * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected
+ * lanes (lane 0 upstream, and the other three lanes, one connected to an
+ * in-board Ethernet adapter and the other two connected to M.2 and mini
+ * PCI slots.
+ *
+ * Each slot has a different clock source and uses a separate PERST# pin.
+ */
+#define MAX_PCI_SLOTS 3
+
+enum pcie_kirin_phy_type {
+ PCIE_KIRIN_INTERNAL_PHY,
+ PCIE_KIRIN_EXTERNAL_PHY
+};
+
+struct kirin_pcie {
+ enum pcie_kirin_phy_type type;
+
+ struct dw_pcie *pci;
+ struct regmap *apb;
+ struct phy *phy;
+ void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
+
+ /* DWC PERST# */
+ int gpio_id_dwc_perst;
+
+ /* Per-slot PERST# */
+ int num_slots;
+ int gpio_id_reset[MAX_PCI_SLOTS];
+ const char *reset_names[MAX_PCI_SLOTS];
+
+ /* Per-slot clkreq */
+ int n_gpio_clkreq;
+ int gpio_id_clkreq[MAX_PCI_SLOTS];
+ const char *clkreq_names[MAX_PCI_SLOTS];
+};
+
+/*
+ * Kirin 960 PHY. Can't be split into a PHY driver without changing the
+ * DT schema.
+ */
+
+#define REF_CLK_FREQ 100000000
+
+/* PHY info located in APB */
+#define PCIE_APB_PHY_CTRL0 0x0
+#define PCIE_APB_PHY_CTRL1 0x4
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PIPE_CLK_STABLE BIT(19)
+#define PHY_REF_PAD_BIT BIT(8)
+#define PHY_PWR_DOWN_BIT BIT(22)
+#define PHY_RST_ACK_BIT BIT(16)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
+#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN 21000
+#define REF_2_PERST_MAX 25000
+#define PERST_2_ACCESS_MIN 10000
+#define PERST_2_ACCESS_MAX 12000
+#define PIPE_CLK_WAIT_MIN 550
+#define PIPE_CLK_WAIT_MAX 600
+#define TIME_CMOS_MIN 100
+#define TIME_CMOS_MAX 105
+#define TIME_PHY_PD_MIN 10
+#define TIME_PHY_PD_MAX 11
+
+struct hi3660_pcie_phy {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *crgctrl;
+ struct regmap *sysctrl;
+ struct clk *apb_sys_clk;
+ struct clk *apb_phy_clk;
+ struct clk *phy_ref_clk;
+ struct clk *aclk;
+ struct clk *aux_clk;
+};
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 val, u32 reg)
+{
+ writel(val, hi3660_pcie_phy->base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 reg)
+{
+ return readl(hi3660_pcie_phy->base + reg);
+}
+
+static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+
+ phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+ if (IS_ERR(phy->phy_ref_clk))
+ return PTR_ERR(phy->phy_ref_clk);
+
+ phy->aux_clk = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(phy->aux_clk))
+ return PTR_ERR(phy->aux_clk);
+
+ phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+ if (IS_ERR(phy->apb_phy_clk))
+ return PTR_ERR(phy->apb_phy_clk);
+
+ phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+ if (IS_ERR(phy->apb_sys_clk))
+ return PTR_ERR(phy->apb_sys_clk);
+
+ phy->aclk = devm_clk_get(dev, "pcie_aclk");
+ if (IS_ERR(phy->aclk))
+ return PTR_ERR(phy->aclk);
+
+ return 0;
+}
+
+static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+ struct platform_device *pdev;
+
+ /* registers */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(phy->crgctrl))
+ return PTR_ERR(phy->crgctrl);
+
+ phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(phy->sysctrl))
+ return PTR_ERR(phy->sysctrl);
+
+ return 0;
+}
+
+static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
+{
+ struct device *dev = phy->dev;
+ u32 reg_val;
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_REF_PAD_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0);
+ reg_val &= ~PHY_PWR_DOWN_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0);
+ usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_RST_ACK_BIT;
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
+
+ usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
+ if (reg_val & PIPE_CLK_STABLE) {
+ dev_err(dev, "PIPE clk is not stable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy)
+{
+ u32 val;
+
+ regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+ val |= PCIE_DEBOUNCE_PARAM;
+ val &= ~PCIE_OE_BYPASS;
+ regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable)
+{
+ int ret = 0;
+
+ if (!enable)
+ goto close_clk;
+
+ ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->phy_ref_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->apb_sys_clk);
+ if (ret)
+ goto apb_sys_fail;
+
+ ret = clk_prepare_enable(phy->apb_phy_clk);
+ if (ret)
+ goto apb_phy_fail;
+
+ ret = clk_prepare_enable(phy->aclk);
+ if (ret)
+ goto aclk_fail;
+
+ ret = clk_prepare_enable(phy->aux_clk);
+ if (ret)
+ goto aux_clk_fail;
+
+ return 0;
+
+close_clk:
+ clk_disable_unprepare(phy->aux_clk);
+aux_clk_fail:
+ clk_disable_unprepare(phy->aclk);
+aclk_fail:
+ clk_disable_unprepare(phy->apb_phy_clk);
+apb_phy_fail:
+ clk_disable_unprepare(phy->apb_sys_clk);
+apb_sys_fail:
+ clk_disable_unprepare(phy->phy_ref_clk);
+
+ return ret;
+}
+
+static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+ int ret;
+
+ /* Power supply for Host */
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+ usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+
+ hi3660_pcie_phy_oe_enable(phy);
+
+ ret = hi3660_pcie_phy_clk_ctrl(phy, true);
+ if (ret)
+ return ret;
+
+ /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+ regmap_write(phy->crgctrl,
+ CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+ regmap_write(phy->sysctrl,
+ SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+ ret = hi3660_pcie_phy_start(phy);
+ if (ret)
+ goto disable_clks;
+
+ return 0;
+
+disable_clks:
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+ return ret;
+}
+
+static int hi3660_pcie_phy_init(struct platform_device *pdev,
+ struct kirin_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_pcie_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ pcie->phy_priv = phy;
+ phy->dev = dev;
+
+ ret = hi3660_pcie_phy_get_clk(phy);
+ if (ret)
+ return ret;
+
+ return hi3660_pcie_phy_get_resource(phy);
+}
+
+static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+
+ /* Drop power supply for Host */
+ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00);
+
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+
+ return 0;
+}
+
+/*
+ * The non-PHY part starts here
+ */
+
+static const struct regmap_config pcie_kirin_regmap_conf = {
+ .name = "kirin_pcie_apb",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ char name[32];
+ int ret, i;
+
+ /* This is an optional property */
+ ret = gpiod_count(dev, "hisilicon,clken");
+ if (ret < 0)
+ return 0;
+
+ if (ret > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many GPIO clock requests!\n");
+ return -EINVAL;
+ }
+
+ pcie->n_gpio_clkreq = ret;
+
+ for (i = 0; i < pcie->n_gpio_clkreq; i++) {
+ pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
+ "hisilicon,clken-gpios", i);
+ if (pcie->gpio_id_clkreq[i] < 0)
+ return pcie->gpio_id_clkreq[i];
+
+ sprintf(name, "pcie_clkreq_%d", i);
+ pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->clkreq_names[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ int ret, slot, i;
+ char name[32];
+
+ for_each_available_child_of_node(node, parent) {
+ for_each_available_child_of_node(parent, child) {
+ i = pcie->num_slots;
+
+ pcie->gpio_id_reset[i] = of_get_named_gpio(child,
+ "reset-gpios", 0);
+ if (pcie->gpio_id_reset[i] < 0)
+ continue;
+
+ pcie->num_slots++;
+ if (pcie->num_slots > MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many PCI slots!\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
+
+ ret = of_pci_get_devfn(child);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse devfn: %d\n", ret);
+ goto put_node;
+ }
+
+ slot = PCI_SLOT(ret);
+
+ sprintf(name, "pcie_perst_%d", slot);
+ pcie->reset_names[i] = devm_kstrdup_const(dev, name,
+ GFP_KERNEL);
+ if (!pcie->reset_names[i]) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+ }
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
+ of_node_put(parent);
+ return ret;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *node = dev->of_node;
+ void __iomem *apb_base;
+ int ret;
+
+ apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base))
+ return PTR_ERR(apb_base);
+
+ kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base,
+ &pcie_kirin_regmap_conf);
+ if (IS_ERR(kirin_pcie->apb))
+ return PTR_ERR(kirin_pcie->apb);
+
+ /* pcie internal PERST# gpio */
+ kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
+ "reset-gpios", 0);
+ if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
+ dev_err(dev, "unable to get a valid gpio pin\n");
+ return -ENODEV;
+ }
+
+ ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ /* Parse OF children */
+ for_each_available_child_of_node(node, child) {
+ ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
+ if (ret)
+ goto put_node;
+ }
+
+ return 0;
+
+put_node:
+ of_node_put(child);
+ return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val);
+}
+
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ int i, ret;
+
+ if (!kirin_pcie->num_slots)
+ return 0;
+
+ /* Send PERST# to each slot */
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ if (ret) {
+ dev_err(pci->dev, "PERST# %s error: %d\n",
+ kirin_pcie->reset_names[i], ret);
+ }
+ }
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+}
+
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+ .add_bus = kirin_pcie_add_bus,
+};
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 ret;
+
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+ dw_pcie_read(base + reg, size, &ret);
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+ return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+ dw_pcie_write(base + reg, size, val);
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 val;
+
+ regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
+ if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static int kirin_pcie_start_link(struct dw_pcie *pci)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+ /* assert LTSSM enable */
+ regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE,
+ PCIE_LTSSM_ENABLE_BIT);
+
+ return 0;
+}
+
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ pp->bridge->ops = &kirin_pci_ops;
+
+ return 0;
+}
+
+static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
+ struct device *dev)
+{
+ int ret, i;
+
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->reset_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
+ kirin_pcie->reset_names[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
+ if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
+ dev_err(dev, "unable to get a valid %s gpio\n",
+ kirin_pcie->clkreq_names[i]);
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
+ kirin_pcie->clkreq_names[i]);
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops kirin_dw_pcie_ops = {
+ .read_dbi = kirin_pcie_read_dbi,
+ .write_dbi = kirin_pcie_write_dbi,
+ .link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
+};
+
+static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
+ .host_init = kirin_pcie_host_init,
+};
+
+static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
+{
+ int i;
+
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY)
+ return hi3660_pcie_phy_power_off(kirin_pcie);
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
+ gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+
+ phy_power_off(kirin_pcie->phy);
+ phy_exit(kirin_pcie->phy);
+
+ return 0;
+}
+
+static int kirin_pcie_power_on(struct platform_device *pdev,
+ struct kirin_pcie *kirin_pcie)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) {
+ ret = hi3660_pcie_phy_init(pdev, kirin_pcie);
+ if (ret)
+ return ret;
+
+ ret = hi3660_pcie_phy_power_on(kirin_pcie);
+ if (ret)
+ return ret;
+ } else {
+ kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(kirin_pcie->phy))
+ return PTR_ERR(kirin_pcie->phy);
+
+ ret = kirin_pcie_gpio_request(kirin_pcie, dev);
+ if (ret)
+ return ret;
+
+ ret = phy_init(kirin_pcie->phy);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(kirin_pcie->phy);
+ if (ret)
+ goto err;
+ }
+
+ /* perst assert Endpoint */
+ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+
+ if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
+ ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+err:
+ kirin_pcie_power_off(kirin_pcie);
+
+ return ret;
+}
+
+static int kirin_pcie_remove(struct platform_device *pdev)
+{
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&kirin_pcie->pci->pp);
+
+ kirin_pcie_power_off(kirin_pcie);
+
+ return 0;
+}
+
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
+static const struct of_device_id kirin_pcie_match[] = {
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
+ {},
+};
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
+ struct kirin_pcie *kirin_pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "NULL node\n");
+ return -EINVAL;
+ }
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+ if (!kirin_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
+ kirin_pcie->pci = pci;
+ kirin_pcie->type = data->phy_type;
+
+ ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, kirin_pcie);
+
+ ret = kirin_pcie_power_on(pdev, kirin_pcie);
+ if (ret)
+ return ret;
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+ .remove = kirin_pcie_remove,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(kirin_pcie_driver);
+
+MODULE_DEVICE_TABLE(of, kirin_pcie_match);
+MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs");
+MODULE_AUTHOR("Xiaowei Song <songxiaowei@huawei.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 000000000..9b62ee699
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe Endpoint controller driver
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/module.h>
+
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_DB_CTRL 0x10
+#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PARF_DEBUG_INT_EN 0x190
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_CFG_BITS 0x210
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SLV_ADDR_MSB_CTRL 0x2c0
+#define PARF_DBI_BASE_ADDR 0x350
+#define PARF_DBI_BASE_ADDR_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_DOWN BIT(1)
+#define PARF_INT_ALL_BME BIT(2)
+#define PARF_INT_ALL_PM_TURNOFF BIT(3)
+#define PARF_INT_ALL_DEBUG BIT(4)
+#define PARF_INT_ALL_LTR BIT(5)
+#define PARF_INT_ALL_MHI_Q6 BIT(6)
+#define PARF_INT_ALL_MHI_A7 BIT(7)
+#define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
+#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
+#define PARF_INT_ALL_MMIO_WRITE BIT(10)
+#define PARF_INT_ALL_CFG_WRITE BIT(11)
+#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_ALL_AER_LEGACY BIT(14)
+#define PARF_INT_ALL_PLS_ERR BIT(15)
+#define PARF_INT_ALL_PME_LEGACY BIT(16)
+#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
+
+/* PARF_BDF_TO_SID_CFG register fields */
+#define PARF_BDF_TO_SID_BYPASS BIT(0)
+
+/* PARF_DEBUG_INT_EN register fields */
+#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
+#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
+#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define PARF_DEVICE_TYPE_EP 0x0
+
+/* PARF_PM_CTRL register fields */
+#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
+#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
+#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
+/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
+
+/* PARF_Q2A_FLUSH register fields */
+#define PARF_Q2A_FLUSH_EN BIT(16)
+
+/* PARF_SYS_CTRL register fields */
+#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
+#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
+#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
+
+/* PARF_DB_CTRL register fields */
+#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
+#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
+#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
+#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
+#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
+
+/* PARF_CFG_BITS register fields */
+#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+
+/* ELBI registers */
+#define ELBI_SYS_STTS 0x08
+#define ELBI_CS2_ENABLE 0xa4
+
+/* DBI registers */
+#define DBI_CON_STATUS 0x44
+
+/* DBI register fields */
+#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
+
+#define XMLH_LINK_UP 0x400
+#define CORE_RESET_TIME_US_MIN 1000
+#define CORE_RESET_TIME_US_MAX 1005
+#define WAKE_DELAY_US 2000 /* 2 ms */
+
+#define PCIE_GEN1_BW_MBPS 250
+#define PCIE_GEN2_BW_MBPS 500
+#define PCIE_GEN3_BW_MBPS 985
+#define PCIE_GEN4_BW_MBPS 1969
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+enum qcom_pcie_ep_link_status {
+ QCOM_PCIE_EP_LINK_DISABLED,
+ QCOM_PCIE_EP_LINK_ENABLED,
+ QCOM_PCIE_EP_LINK_UP,
+ QCOM_PCIE_EP_LINK_DOWN,
+};
+
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @elbi: Designware PCIe specific ELBI register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @icc_mem: Handle to an interconnect path between PCIe and MEM
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
+struct qcom_pcie_ep {
+ struct dw_pcie pci;
+
+ void __iomem *parf;
+ void __iomem *elbi;
+ void __iomem *mmio;
+ struct regmap *perst_map;
+ struct resource *mmio_res;
+
+ struct reset_control *core_reset;
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct phy *phy;
+ struct dentry *debugfs;
+
+ struct icc_path *icc_mem;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ u32 perst_en;
+ u32 perst_sep_en;
+
+ enum qcom_pcie_ep_link_status link_status;
+ int global_irq;
+ int perst_irq;
+};
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_assert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ ret = reset_control_deassert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot de-assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ return 0;
+}
+
+/*
+ * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
+ * device reset during host reboot and hibernation. The driver is
+ * expected to handle this situation.
+ */
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
+}
+
+static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+
+ return reg & XMLH_LINK_UP;
+}
+
+static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ enable_irq(pcie_ep->perst_irq);
+
+ return 0;
+}
+
+static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ disable_irq(pcie_ep->perst_irq);
+}
+
+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ int ret;
+
+ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
+
+ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
+}
+
+static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ u32 offset, status, bw;
+ int speed, width;
+ int ret;
+
+ if (!pcie_ep->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ switch (speed) {
+ case 1:
+ bw = MBps_to_icc(PCIE_GEN1_BW_MBPS);
+ break;
+ case 2:
+ bw = MBps_to_icc(PCIE_GEN2_BW_MBPS);
+ break;
+ case 3:
+ bw = MBps_to_icc(PCIE_GEN3_BW_MBPS);
+ break;
+ default:
+ dev_warn(pci->dev, "using default GEN4 bandwidth\n");
+ fallthrough;
+ case 4:
+ bw = MBps_to_icc(PCIE_GEN4_BW_MBPS);
+ break;
+ }
+
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, width * bw);
+ if (ret)
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+}
+
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_set_mode_ext(pcie_ep->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_EP);
+ if (ret)
+ goto err_phy_exit;
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, MBps_to_icc(PCIE_GEN1_BW_MBPS));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ goto err_phy_off;
+ }
+
+ return 0;
+
+err_phy_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ icc_set_bw(pcie_ep->icc_mem, 0, 0);
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+ gpiod_set_value_cansleep(pcie_ep->wake, 0);
+
+ qcom_pcie_ep_configure_tcsr(pcie_ep);
+
+ /* Disable BDF to SID mapping */
+ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+ val |= PARF_BDF_TO_SID_BYPASS;
+ writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+
+ /* Enable debug IRQ */
+ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
+ val |= PARF_DEBUG_INT_RADM_PM_TURNOFF |
+ PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
+ PARF_DEBUG_INT_PM_DSTATE_CHANGE;
+ writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
+
+ /* Allow entering L1 state */
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+
+ /* Read halts write */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+
+ /* Write after write halt */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ /* Q2A flush disable */
+ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
+ val &= ~PARF_Q2A_FLUSH_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
+
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
+ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
+ val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
+ PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
+ PARF_SYS_CTRL_AUX_PWR_DET;
+ writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
+
+ /* Disable the debouncers */
+ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
+ val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
+ PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
+ PARF_DB_CTRL_MST_WKP_BLOCK;
+ writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
+ val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L0SEL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
+ val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
+ PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+
+ ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ /*
+ * The physical address of the MMIO region which is exposed as the BAR
+ * should be written to MHI BASE registers.
+ */
+ writel_relaxed(pcie_ep->mmio_res->start,
+ pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
+ writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
+ val |= BIT(8);
+ writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+
+ return 0;
+
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
+
+ return ret;
+}
+
+static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+ dev_dbg(dev, "Link is already disabled\n");
+ return;
+ }
+
+ qcom_pcie_disable_resources(pcie_ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+};
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->elbi))
+ return PTR_ERR(pcie_ep->elbi);
+
+ pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
+ if (!syscon) {
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
+ }
+
+ pcie_ep->perst_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(pcie_ep->perst_map))
+ return PTR_ERR(pcie_ep->perst_map);
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 1, &pcie_ep->perst_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Enable offset in syscon\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 2, &pcie_ep->perst_sep_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Separation Enable offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
+
+ pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(pcie_ep->core_reset))
+ return PTR_ERR(pcie_ep->core_reset);
+
+ pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie_ep->reset))
+ return PTR_ERR(pcie_ep->reset);
+
+ pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie_ep->wake))
+ return PTR_ERR(pcie_ep->wake);
+
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem");
+ if (IS_ERR(pcie_ep->icc_mem))
+ ret = PTR_ERR(pcie_ep->icc_mem);
+
+ return ret;
+}
+
+/* TODO: Notify clients about PCIe state change */
+static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
+ u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
+ u32 dstate, val;
+
+ writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
+ status &= mask;
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
+ dev_dbg(dev, "Received Linkdown event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ pci_epc_linkdown(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
+ dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ qcom_pcie_ep_icc_update(pcie_ep);
+ pci_epc_bme_notify(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
+ dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_READY_ENTR_L23;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
+ dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
+ DBI_CON_STATUS_POWER_STATE_MASK;
+ dev_dbg(dev, "Received D%d state event\n", dstate);
+ if (dstate == 3) {
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_REQ_EXIT_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ }
+ } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
+ } else {
+ dev_err(dev, "Received unknown event: %d\n", status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 perst;
+
+ perst = gpiod_get_value(pcie_ep->reset);
+ if (perst) {
+ dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
+ qcom_pcie_perst_assert(pci);
+ } else {
+ dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
+ qcom_pcie_perst_deassert(pci);
+ }
+
+ irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ int ret;
+
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
+ qcom_pcie_ep_global_irq_thread,
+ IRQF_ONESHOT,
+ "global_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request Global IRQ\n");
+ return ret;
+ }
+
+ pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
+ irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
+ qcom_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
+ disable_irq(pcie_ep->global_irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = SZ_4K,
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static const struct dw_pcie_ep_ops pci_ep_ops = {
+ .ep_init = qcom_pcie_ep_init,
+ .raise_irq = qcom_pcie_ep_raise_irq,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep *pcie_ep;
+ char *name;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->pci.dev = dev;
+ pcie_ep->pci.ops = &pci_ops;
+ pcie_ep->pci.ep.ops = &pci_ep_ops;
+ pcie_ep->pci.edma.nr_irqs = 1;
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret)
+ goto err_disable_resources;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
+ return 0;
+
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
+
+ return ret;
+}
+
+static void qcom_pcie_ep_remove(struct platform_device *pdev)
+{
+ struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
+ return;
+
+ qcom_pcie_disable_resources(pcie_ep);
+}
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .remove_new = qcom_pcie_ep_remove,
+ .driver = {
+ .name = "qcom-pcie-ep",
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
+
+MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
new file mode 100644
index 000000000..64420ecc2
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -0,0 +1,1655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_PM_CTRL 0x20
+#define PARF_PCS_DEEMPH 0x34
+#define PARF_PCS_SWING 0x38
+#define PARF_PHY_CTRL 0x40
+#define PARF_PHY_REFCLK 0x4c
+#define PARF_CONFIG_BITS 0x50
+#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_SID_OFFSET 0x234
+#define PARF_BDF_TRANSLATE_CFG 0x24c
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_TABLE_N 0x2000
+
+/* ELBI registers */
+#define ELBI_SYS_CTRL 0x04
+
+/* DBI registers */
+#define AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define AXI_MSTR_RESP_COMP_CTRL1 0x81c
+
+/* MHI registers */
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+
+/* PARF_SYS_CTRL register fields */
+#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
+#define MST_WAKEUP_EN BIT(13)
+#define SLV_WAKEUP_EN BIT(12)
+#define MSTR_ACLK_CGC_DIS BIT(10)
+#define SLV_ACLK_CGC_DIS BIT(9)
+#define CORE_CLK_CGC_DIS BIT(6)
+#define AUX_PWR_DET BIT(4)
+#define L23_CLK_RMV_DIS BIT(2)
+#define L1_CLK_RMV_DIS BIT(1)
+
+/* PARF_PM_CTRL register fields */
+#define REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_PCS_DEEMPH register fields */
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x)
+
+/* PARF_PCS_SWING register fields */
+#define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x)
+#define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x)
+
+/* PARF_PHY_CTRL register fields */
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
+#define PHY_TEST_PWR_DOWN BIT(0)
+
+/* PARF_PHY_REFCLK register fields */
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
+/* PARF_CONFIG_BITS register fields */
+#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
+
+/* PARF_SLV_ADDR_SPACE_SIZE register value */
+#define SLV_ADDR_SPACE_SZ 0x10000000
+
+/* PARF_MHI_CLOCK_RESET_CTRL register fields */
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define EN BIT(31)
+
+/* PARF_LTSSM register fields */
+#define LTSSM_EN BIT(8)
+
+/* PARF_DEVICE_TYPE register fields */
+#define DEVICE_TYPE_RC 0x4
+
+/* ELBI_SYS_CTRL register fields */
+#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+
+/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
+/* PCI_EXP_SLTCAP register fields */
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
+
+#define PERST_DELAY_US 1000
+
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
+#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
+struct qcom_pcie_resources_1_0_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct reset_control *core;
+ struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
+#define QCOM_PCIE_2_1_0_MAX_RESETS 6
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
+ int num_resets;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+struct qcom_pcie_resources_2_3_2 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
+#define QCOM_PCIE_2_3_3_MAX_RESETS 7
+struct qcom_pcie_resources_2_3_3 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
+};
+
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
+#define QCOM_PCIE_2_4_0_MAX_RESETS 12
+struct qcom_pcie_resources_2_4_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
+ int num_resets;
+};
+
+#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
+#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
+struct qcom_pcie_resources_2_7_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ int num_clks;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
+ struct reset_control *rst;
+};
+
+#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct reset_control *rst;
+};
+
+union qcom_pcie_resources {
+ struct qcom_pcie_resources_1_0_0 v1_0_0;
+ struct qcom_pcie_resources_2_1_0 v2_1_0;
+ struct qcom_pcie_resources_2_3_2 v2_3_2;
+ struct qcom_pcie_resources_2_3_3 v2_3_3;
+ struct qcom_pcie_resources_2_4_0 v2_4_0;
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+ int (*get_resources)(struct qcom_pcie *pcie);
+ int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
+ void (*deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie_cfg {
+ const struct qcom_pcie_ops *ops;
+};
+
+struct qcom_pcie {
+ struct dw_pcie *pci;
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
+ void __iomem *mhi;
+ union qcom_pcie_resources res;
+ struct phy *phy;
+ struct gpio_desc *reset;
+ struct icc_path *icc_mem;
+ const struct qcom_pcie_cfg *cfg;
+ struct dentry *debugfs;
+ bool suspended;
+};
+
+#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(100);
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ /* Enable Link Training state machine */
+ if (pcie->cfg->ops->ltssm_enable)
+ pcie->cfg->ops->ltssm_enable(pcie);
+
+ return 0;
+}
+
+static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ val &= ~PCI_EXP_SLTCAP_HPC;
+ writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + ELBI_SYS_CTRL);
+ val |= ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vdda_phy";
+ res->supplies[2].supply = "vdda_refclk";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "core";
+ res->clks[2].id = "phy";
+ res->clks[3].id = "aux";
+ res->clks[4].id = "ref";
+
+ /* iface, core, phy are required */
+ ret = devm_clk_bulk_get(dev, 3, res->clks);
+ if (ret < 0)
+ return ret;
+
+ /* aux, ref are optional */
+ ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
+ if (ret < 0)
+ return ret;
+
+ res->resets[0].id = "pci";
+ res->resets[1].id = "axi";
+ res->resets[2].id = "ahb";
+ res->resets[3].id = "por";
+ res->resets[4].id = "phy";
+ res->resets[5].id = "ext";
+
+ /* ext is optional on APQ8016 */
+ res->num_resets = is_apq ? 5 : 6;
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ reset_control_bulk_assert(res->num_resets, res->resets);
+
+ writel(1, pcie->parf + PARF_PHY_CTRL);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
+ of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
+ }
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+ }
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PARF_PHY_REFCLK);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PARF_PHY_REFCLK);
+
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "aux";
+ res->clks[2].id = "master_bus";
+ res->clks[3].id = "slave_bus";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->core = devm_reset_control_get_exclusive(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+ reset_control_assert(res->core);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_deassert(res->core);
+ if (ret) {
+ dev_err(dev, "cannot deassert core reset\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_reset;
+ }
+
+ ret = regulator_enable(res->vdda);
+ if (ret) {
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_assert_reset:
+ reset_control_assert(res->core);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PARF_LTSSM);
+ val |= LTSSM_EN;
+ writel(val, pcie->parf + PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "cfg";
+ res->clks[2].id = "bus_master";
+ res->clks[3].id = "bus_slave";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+ int ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "master_bus";
+ res->clks[2].id = "slave_bus";
+ res->clks[3].id = "iface";
+
+ /* qcom,pcie-ipq4019 is defined without "iface" */
+ res->num_clks = is_ipq ? 3 : 4;
+
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->resets[0].id = "axi_m";
+ res->resets[1].id = "axi_s";
+ res->resets[2].id = "axi_m_sticky";
+ res->resets[3].id = "pipe_sticky";
+ res->resets[4].id = "pwr";
+ res->resets[5].id = "ahb";
+ res->resets[6].id = "pipe";
+ res->resets[7].id = "axi_m_vmid";
+ res->resets[8].id = "axi_s_xpu";
+ res->resets[9].id = "parf";
+ res->resets[10].id = "phy";
+ res->resets[11].id = "phy_ahb";
+
+ res->num_resets = is_ipq ? 12 : 6;
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret) {
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "ahb";
+ res->clks[4].id = "aux";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst[0].id = "axi_m";
+ res->rst[1].id = "axi_s";
+ res->rst[2].id = "pipe";
+ res->rst[3].id = "axi_m_sticky";
+ res->rst[4].id = "sticky";
+ res->rst[5].id = "ahb";
+ res->rst[6].id = "sleep";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
+ }
+
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_resets;
+ }
+
+ return 0;
+
+err_assert_resets:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PARF_SYS_CTRL);
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
+
+ writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ unsigned int num_clks, num_opt_clks;
+ unsigned int idx;
+ int ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ idx = 0;
+ res->clks[idx++].id = "aux";
+ res->clks[idx++].id = "cfg";
+ res->clks[idx++].id = "bus_master";
+ res->clks[idx++].id = "bus_slave";
+ res->clks[idx++].id = "slave_q2a";
+
+ num_clks = idx;
+
+ ret = devm_clk_bulk_get(dev, num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->clks[idx++].id = "tbu";
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ res->clks[idx++].id = "aggre0";
+ res->clks[idx++].id = "aggre1";
+ res->clks[idx++].id = "noc_aggr";
+ res->clks[idx++].id = "noc_aggr_4";
+ res->clks[idx++].id = "noc_aggr_south_sf";
+ res->clks[idx++].id = "cnoc_qx";
+ res->clks[idx++].id = "sleep";
+ res->clks[idx++].id = "cnoc_sf_axi";
+
+ num_opt_clks = idx - num_clks;
+ res->num_clks = idx;
+
+ ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret < 0)
+ goto err_disable_regulators;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ goto err_disable_clocks;
+ }
+
+ usleep_range(1000, 1500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ goto err_disable_clocks;
+ }
+
+ /* Wait for reset to complete, required on SM8450 */
+ usleep_range(1000, 1500);
+
+ /* configure PCIe to RC mode */
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PARF_PM_CTRL);
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+err_disable_clocks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
+ size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "axi_bridge";
+ res->clks[4].id = "rchng";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
+
+ return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
+
+ return 0;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
+
+ qcom_ep_reset_assert(pcie);
+
+ ret = pcie->cfg->ops->init(pcie);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ goto err_deinit;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ goto err_deinit;
+
+ if (pcie->cfg->ops->post_init) {
+ ret = pcie->cfg->ops->post_init(pcie);
+ if (ret)
+ goto err_disable_phy;
+ }
+
+ qcom_ep_reset_deassert(pcie);
+
+ if (pcie->cfg->ops->config_sid) {
+ ret = pcie->cfg->ops->config_sid(pcie);
+ if (ret)
+ goto err_assert_reset;
+ }
+
+ return 0;
+
+err_assert_reset:
+ qcom_ep_reset_assert(pcie);
+err_disable_phy:
+ phy_power_off(pcie->phy);
+err_deinit:
+ pcie->cfg->ops->deinit(pcie);
+
+ return ret;
+}
+
+static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ qcom_ep_reset_assert(pcie);
+ phy_power_off(pcie->phy);
+ pcie->cfg->ops->deinit(pcie);
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+ .host_init = qcom_pcie_host_init,
+ .host_deinit = qcom_pcie_host_deinit,
+};
+
+/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+ .get_resources = qcom_pcie_get_resources_2_1_0,
+ .init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
+ .deinit = qcom_pcie_deinit_2_1_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+ .get_resources = qcom_pcie_get_resources_1_0_0,
+ .init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
+ .deinit = qcom_pcie_deinit_1_0_0,
+ .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+ .get_resources = qcom_pcie_get_resources_2_3_2,
+ .init = qcom_pcie_init_2_3_2,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_3_2,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+ .get_resources = qcom_pcie_get_resources_2_4_0,
+ .init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_3_2,
+ .deinit = qcom_pcie_deinit_2_4_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+ .get_resources = qcom_pcie_get_resources_2_3_3,
+ .init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
+ .deinit = qcom_pcie_deinit_2_3_3,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .config_sid = qcom_pcie_config_sid_1_9_0,
+};
+
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
+ .ops = &ops_1_0_0,
+};
+
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
+ .ops = &ops_2_1_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
+ .ops = &ops_2_3_2,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
+ .ops = &ops_2_3_3,
+};
+
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
+ .ops = &ops_2_4_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
+ .ops = &ops_2_7_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
+ .ops = &ops_2_9_0,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
+};
+
+static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ int ret;
+
+ pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
+ if (IS_ERR(pcie->icc_mem))
+ return PTR_ERR(pcie->icc_mem);
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u32 offset, status, bw;
+ int speed, width;
+ int ret;
+
+ if (!pcie->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ /* Only update constraints if link is up. */
+ if (!(status & PCI_EXP_LNKSTA_DLLLA))
+ return;
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ switch (speed) {
+ case 1:
+ bw = MBps_to_icc(250);
+ break;
+ case 2:
+ bw = MBps_to_icc(500);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case 3:
+ bw = MBps_to_icc(985);
+ break;
+ }
+
+ ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ }
+}
+
+static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
+ qcom_pcie_link_transition_count);
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ const struct qcom_pcie_cfg *pcie_cfg;
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct resource *res;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg || !pcie_cfg->ops) {
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pp = &pci->pp;
+
+ pcie->pci = pci;
+
+ pcie->cfg = pcie_cfg;
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset)) {
+ ret = PTR_ERR(pcie->reset);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie->parf)) {
+ ret = PTR_ERR(pcie->parf);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
+
+ /* MHI region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
+ if (res) {
+ pcie->mhi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->mhi)) {
+ ret = PTR_ERR(pcie->mhi);
+ goto err_pm_runtime_put;
+ }
+ }
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ goto err_pm_runtime_put;
+ }
+
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ ret = pcie->cfg->ops->get_resources(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ pp->ops = &qcom_pcie_dw_ops;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+ goto err_phy_exit;
+ }
+
+ qcom_pcie_icc_update(pcie);
+
+ if (pcie->mhi)
+ qcom_pcie_init_debugfs(pcie);
+
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie->phy);
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int qcom_pcie_suspend_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * Set minimum bandwidth required to keep data path functional during
+ * suspend.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Turn OFF the resources only for controllers without active PCIe
+ * devices. For controllers with active devices, the resources are kept
+ * ON and the link is expected to be in L0/L1 (sub)states.
+ *
+ * Turning OFF the resources for controllers with active PCIe devices
+ * will trigger access violation during the end of the suspend cycle,
+ * as kernel tries to access the PCIe devices config space for masking
+ * MSIs.
+ *
+ * Also, it is not desirable to put the link into L2/L3 state as that
+ * implies VDD supply will be removed and the devices may go into
+ * powerdown state. This will affect the lifetime of the storage devices
+ * like NVMe.
+ */
+ if (!dw_pcie_link_up(pcie->pci)) {
+ qcom_pcie_host_deinit(&pcie->pci->pp);
+ pcie->suspended = true;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_resume_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (pcie->suspended) {
+ ret = qcom_pcie_host_init(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ pcie->suspended = false;
+ }
+
+ qcom_pcie_icc_update(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { }
+};
+
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+
+static const struct dev_pm_ops qcom_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .driver = {
+ .name = "qcom-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = qcom_pcie_match,
+ .pm = &qcom_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
new file mode 100644
index 000000000..99d47ae80
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@gmail.com>
+ * Mohit Kumar <mohit.kumar.dhaka@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+ struct dw_pcie *pci;
+ void __iomem *app_base;
+ struct phy *phy;
+ struct clk *clk;
+};
+
+struct pcie_app_reg {
+ u32 app_ctrl_0; /* cr0 */
+ u32 app_ctrl_1; /* cr1 */
+ u32 app_status_0; /* cr2 */
+ u32 app_status_1; /* cr3 */
+ u32 msg_status; /* cr4 */
+ u32 msg_payload; /* cr5 */
+ u32 int_sts; /* cr6 */
+ u32 int_clr; /* cr7 */
+ u32 int_mask; /* cr8 */
+ u32 mst_bmisc; /* cr9 */
+ u32 phy_ctrl; /* cr10 */
+ u32 phy_status; /* cr11 */
+ u32 cxpl_debug_info_0; /* cr12 */
+ u32 cxpl_debug_info_1; /* cr13 */
+ u32 ven_msg_ctrl_0; /* cr14 */
+ u32 ven_msg_ctrl_1; /* cr15 */
+ u32 ven_msg_data_0; /* cr16 */
+ u32 ven_msg_data_1; /* cr17 */
+ u32 ven_msi_0; /* cr18 */
+ u32 ven_msi_1; /* cr19 */
+ u32 mst_rmisc; /* cr20 */
+};
+
+/* CR0 ID */
+#define APP_LTSSM_ENABLE_ID 3
+#define DEVICE_TYPE_RC (4 << 25)
+#define MISCTRL_EN_ID 30
+#define REG_TRANSLATION_ENABLE 31
+
+/* CR3 ID */
+#define XMLH_LINK_UP (1 << 6)
+
+/* CR6 */
+#define MSI_CTRL_INT (1 << 26)
+
+#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
+
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ /* enable ltssm */
+ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+ | (1 << APP_LTSSM_ENABLE_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+
+ return 0;
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct spear13xx_pcie *spear13xx_pcie = arg;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+ struct dw_pcie *pci = spear13xx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ unsigned int status;
+
+ status = readl(&app_reg->int_sts);
+
+ if (status & MSI_CTRL_INT) {
+ BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
+ dw_handle_msi_irq(pp);
+ }
+
+ writel(status, &app_reg->int_clr);
+
+ return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
+{
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ /* Enable MSI interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ writel(readl(&app_reg->int_mask) |
+ MSI_CTRL_INT, &app_reg->int_mask);
+}
+
+static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
+
+ if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+ return 1;
+
+ return 0;
+}
+
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
+
+ spear13xx_pcie_enable_interrupts(spear13xx_pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+ .host_init = spear13xx_pcie_host_init,
+};
+
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = spear13xx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "spear1340-pcie", spear13xx_pcie);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
+};
+
+static int spear13xx_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct spear13xx_pcie *spear13xx_pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+ if (!spear13xx_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ spear13xx_pcie->pci = pci;
+
+ spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(spear13xx_pcie->phy)) {
+ ret = PTR_ERR(spear13xx_pcie->phy);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev, "probe deferred\n");
+ else
+ dev_err(dev, "couldn't get pcie-phy\n");
+ return ret;
+ }
+
+ phy_init(spear13xx_pcie->phy);
+
+ spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spear13xx_pcie->clk)) {
+ dev_err(dev, "couldn't get clk for pcie\n");
+ return PTR_ERR(spear13xx_pcie->clk);
+ }
+ ret = clk_prepare_enable(spear13xx_pcie->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk for pcie\n");
+ return ret;
+ }
+
+ if (of_property_read_bool(np, "st,pcie-is-gen1"))
+ pci->link_gen = 1;
+
+ platform_set_drvdata(pdev, spear13xx_pcie);
+
+ ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(spear13xx_pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+ { .compatible = "st,spear1340-pcie", },
+ {},
+};
+
+static struct platform_driver spear13xx_pcie_driver = {
+ .probe = spear13xx_pcie_probe,
+ .driver = {
+ .name = "spear-pcie",
+ .of_match_table = spear13xx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000..55f61914a
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 000000000..248cd9347
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,2514 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
+ *
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include "../../pci.h"
+
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
+#define APPL_PINMUX 0x0
+#define APPL_PINMUX_PEX_RST BIT(0)
+#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
+#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
+
+#define APPL_CTRL 0x4
+#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
+#define APPL_CTRL_LTSSM_EN BIT(7)
+#define APPL_CTRL_HW_HOT_RST_EN BIT(20)
+#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
+#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
+
+#define APPL_INTR_EN_L0_0 0x8
+#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
+#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
+#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
+#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
+#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
+#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
+
+#define APPL_INTR_STATUS_L0 0xC
+#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
+#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
+#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
+
+#define APPL_INTR_EN_L1_0_0 0x1C
+#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
+
+#define APPL_INTR_STATUS_L1_0_0 0x20
+#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
+
+#define APPL_INTR_STATUS_L1_1 0x2C
+#define APPL_INTR_STATUS_L1_2 0x30
+#define APPL_INTR_STATUS_L1_3 0x34
+#define APPL_INTR_STATUS_L1_6 0x3C
+#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
+
+#define APPL_INTR_EN_L1_8_0 0x44
+#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
+#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
+#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
+
+#define APPL_INTR_STATUS_L1_8_0 0x4C
+#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
+#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
+#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
+
+#define APPL_INTR_STATUS_L1_9 0x54
+#define APPL_INTR_STATUS_L1_10 0x58
+#define APPL_INTR_STATUS_L1_11 0x64
+#define APPL_INTR_STATUS_L1_13 0x74
+#define APPL_INTR_STATUS_L1_14 0x78
+#define APPL_INTR_STATUS_L1_15 0x7C
+#define APPL_INTR_STATUS_L1_17 0x88
+
+#define APPL_INTR_EN_L1_18 0x90
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_INTR_STATUS_L1_18 0x94
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_MSI_CTRL_1 0xAC
+
+#define APPL_MSI_CTRL_2 0xB0
+
+#define APPL_LEGACY_INTX 0xB8
+
+#define APPL_LTR_MSG_1 0xC4
+#define LTR_MSG_REQ BIT(15)
+#define LTR_MST_NO_SNOOP_SHIFT 16
+
+#define APPL_LTR_MSG_2 0xC8
+#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
+
+#define APPL_LINK_STATUS 0xCC
+#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
+
+#define APPL_DEBUG 0xD0
+#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
+#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
+#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
+#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
+#define LTSSM_STATE_PRE_DETECT 5
+
+#define APPL_RADM_STATUS 0xE4
+#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
+
+#define APPL_DM_TYPE 0x100
+#define APPL_DM_TYPE_MASK GENMASK(3, 0)
+#define APPL_DM_TYPE_RP 0x4
+#define APPL_DM_TYPE_EP 0x0
+
+#define APPL_CFG_BASE_ADDR 0x104
+#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
+
+#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
+#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
+
+#define APPL_CFG_MISC 0x110
+#define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
+#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
+#define APPL_CFG_MISC_ARCACHE_SHIFT 10
+#define APPL_CFG_MISC_ARCACHE_VAL 3
+
+#define APPL_CFG_SLCG_OVERRIDE 0x114
+#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
+
+#define APPL_CAR_RESET_OVRD 0x12C
+#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
+
+#define IO_BASE_IO_DECODE BIT(0)
+#define IO_BASE_IO_DECODE_BIT8 BIT(8)
+
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
+
+#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
+#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
+
+#define N_FTS_VAL 52
+#define FTS_VAL 52
+
+#define GEN3_EQ_CONTROL_OFF 0x8a8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
+
+#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
+#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
+#define PORT_LOGIC_MSIX_DOORBELL 0x948
+
+#define CAP_SPCIE_CAP_OFF 0x154
+#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
+
+#define PME_ACK_TIMEOUT 10000
+
+#define LTSSM_TIMEOUT 50000 /* 50ms */
+
+#define GEN3_GEN4_EQ_PRESET_INIT 5
+
+#define GEN1_CORE_CLK_FREQ 62500000
+#define GEN2_CORE_CLK_FREQ 125000000
+#define GEN3_CORE_CLK_FREQ 250000000
+#define GEN4_CORE_CLK_FREQ 500000000
+
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
+static const unsigned int pcie_gen_freq[] = {
+ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ GEN1_CORE_CLK_FREQ,
+ GEN2_CORE_CLK_FREQ,
+ GEN3_CORE_CLK_FREQ,
+ GEN4_CORE_CLK_FREQ
+};
+
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
+ struct device *dev;
+ struct resource *appl_res;
+ struct resource *dbi_res;
+ struct resource *atu_dma_res;
+ void __iomem *appl_base;
+ struct clk *core_clk;
+ struct reset_control *core_apb_rst;
+ struct reset_control *core_rst;
+ struct dw_pcie pci;
+ struct tegra_bpmp *bpmp;
+
+ struct tegra_pcie_dw_of_data *of_data;
+
+ bool supports_clkreq;
+ bool enable_cdm_check;
+ bool enable_srns;
+ bool link_state;
+ bool update_fc_fixup;
+ bool enable_ext_refclk;
+ u8 init_link_width;
+ u32 msi_ctrl_int;
+ u32 num_lanes;
+ u32 cid;
+ u32 cfg_link_cap_l1sub;
+ u32 ras_des_cap;
+ u32 pcie_cap_base;
+ u32 aspm_cmrt;
+ u32 aspm_pwr_on_t;
+ u32 aspm_l0s_enter_lat;
+
+ struct regulator *pex_ctl_supply;
+ struct regulator *slot_ctl_3v3;
+ struct regulator *slot_ctl_12v;
+
+ unsigned int phy_count;
+ struct phy **phys;
+
+ struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+ long link_status;
+ struct icc_path *icc_path;
+};
+
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+{
+ return container_of(pci, struct tegra_pcie_dw, pci);
+}
+
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->appl_base + reg);
+}
+
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->appl_base + reg);
+}
+
+struct tegra_pcie_soc {
+ enum dw_pcie_device_mode mode;
+};
+
+static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed, width;
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+
+ val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
+
+ if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
+ dev_err(pcie->dev, "can't set bw[%u]\n", val);
+
+ if (speed >= ARRAY_SIZE(pcie_gen_freq))
+ speed = 0;
+
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+}
+
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 current_link_width;
+ u16 val;
+
+ /*
+ * NOTE:- Since this scenario is uncommon and link as such is not
+ * stable anyway, not waiting to confirm if link is really
+ * transitioning to Gen-2 speed
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2);
+ val &= ~PCI_EXP_LNKCTL2_TLS;
+ val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2, val);
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val |= PCI_EXP_LNKCTL_RL;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL, val);
+ }
+ }
+}
+
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
+ u16 val_w;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
+ /* SBR & Surprise Link Down WAR */
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+ udelay(1);
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+ apply_bad_link_workaround(pp);
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
+ PCI_EXP_LNKSTA_CLS);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ dev_info(pci->dev, "CDM check complete\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ dev_err(pci->dev, "CDM comparison mismatch\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ dev_err(pci->dev, "CDM Logic error\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+
+ if (test_and_clear_bit(0, &pcie->link_status))
+ dw_pcie_ep_linkup(ep);
+
+ tegra_pcie_icc_set(pcie);
+
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ int spurious = 1;
+ u32 status_l0, status_l1, link_status;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ set_bit(0, &pcie->link_status);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ *val = 0x00000000;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
+#if defined(CONFIG_PCIEASPM)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+ val &= ~PCI_L1SS_CAP_ASPM_L1_1;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+ val &= ~PCI_L1SS_CAP_ASPM_L1_2;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
+ val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
+ val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
+
+ return val;
+}
+
+static int aspm_state_cnt(struct seq_file *s, void *data)
+{
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+ dev_get_drvdata(s->private);
+ u32 val;
+
+ seq_printf(s, "Tx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
+
+ seq_printf(s, "Rx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
+
+ seq_printf(s, "Link L1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
+
+ seq_printf(s, "Link L1.1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
+
+ seq_printf(s, "Link L1.2 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
+
+ /* Clear all counters */
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
+ EVENT_COUNTER_ALL_CLEAR);
+
+ /* Re-enable counting */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ return 0;
+}
+
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+
+ val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
+ /* Enable ASPM counters */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ /* Program T_cmrt and T_pwr_on values */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
+ val |= (pcie->aspm_cmrt << 8);
+ val |= (pcie->aspm_pwr_on_t << 19);
+ dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+
+ /* Program L0s and L1 entrance latencies */
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+}
+
+static void init_debugfs(struct tegra_pcie_dw *pcie)
+{
+ debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
+}
+#else
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
+#endif
+
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_w;
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
+
+ if (pcie->enable_cdm_check) {
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= pcie->of_data->cdm_chk_int_en_bit;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_18);
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_18);
+ }
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val_w |= PCI_EXP_LNKCTL_LBMIE;
+ dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
+ val_w);
+}
+
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable legacy interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_INT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+ val |= APPL_INTR_EN_L1_8_INTX_EN;
+ val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
+ val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+ if (IS_ENABLED(CONFIG_PCIEAER))
+ val |= APPL_INTR_EN_L1_8_AER_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+}
+
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable MSI interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+}
+
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /* Clear interrupt statuses before enabling interrupts */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ tegra_pcie_enable_system_interrupts(pp);
+ tegra_pcie_enable_legacy_interrupts(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_enable_msi_interrupts(pp);
+}
+
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, offset, i;
+
+ /* Program init preset */
+ for (i = 0; i < pcie->num_lanes; i++) {
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
+ val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
+
+ offset = dw_pcie_find_ext_capability(pci,
+ PCI_EXT_CAP_ID_PL_16GT) +
+ PCI_PL_16GT_LE_CTRL;
+ val = dw_pcie_readb_dbi(pci, offset + i);
+ val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+ val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+ val |= (pcie->of_data->gen4_preset_vec <<
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+}
+
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_16;
+
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+ val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+ dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+
+ val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
+ dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
+
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+ /* Enable as 0xFFFF0001 response for CRS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
+ val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
+
+ /* Configure Max lane width from DT */
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_MLW;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, offset, tmp;
+ bool retry = true;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
+ /* Assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ usleep_range(100, 200);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ /* De-assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ msleep(100);
+
+ if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
+ /*
+ * There are some endpoints which can't get the link up if
+ * root port has Data Link Feature (DLF) enabled.
+ * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
+ * on Scaled Flow Control and DLF.
+ * So, need to confirm that is indeed the case here and attempt
+ * link up once again with DLF disabled.
+ */
+ val = appl_readl(pcie, APPL_DEBUG);
+ val &= APPL_DEBUG_LTSSM_STATE_MASK;
+ val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
+ if (!(val == 0x11 && !tmp)) {
+ /* Link is down for all good reasons */
+ return 0;
+ }
+
+ dev_info(pci->dev, "Link is down in DLL");
+ dev_info(pci->dev, "Trying again with DLFE disabled\n");
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ reset_control_assert(pcie->core_rst);
+ reset_control_deassert(pcie->core_rst);
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
+ val &= ~PCI_DLF_EXCHANGE_ENABLE;
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
+
+ retry = false;
+ goto retry_link;
+ }
+
+ tegra_pcie_icc_set(pcie);
+
+ tegra_pcie_enable_interrupts(pp);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
+static const struct dw_pcie_ops tegra_dw_pcie_ops = {
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
+};
+
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .host_init = tegra_pcie_dw_host_init,
+};
+
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int phy_count = pcie->phy_count;
+
+ while (phy_count--) {
+ phy_power_off(pcie->phys[phy_count]);
+ phy_exit(pcie->phys[phy_count]);
+ }
+}
+
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_power_off;
+
+ ret = phy_power_on(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_exit;
+ }
+
+ return 0;
+
+phy_power_off:
+ while (i--) {
+ phy_power_off(pcie->phys[i]);
+phy_exit:
+ phy_exit(pcie->phys[i]);
+ }
+
+ return ret;
+}
+
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device_node *np = pcie->dev->of_node;
+ int ret;
+
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
+ if (ret < 0) {
+ dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
+ &pcie->aspm_pwr_on_t);
+ if (ret < 0)
+ dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
+ ret);
+
+ ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
+ &pcie->aspm_l0s_enter_lat);
+ if (ret < 0)
+ dev_info(pcie->dev,
+ "Failed to read ASPM L0s Entrance latency: %d\n", ret);
+
+ ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_count_strings(np, "phy-names");
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
+ ret);
+ return ret;
+ }
+ pcie->phy_count = ret;
+
+ if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
+ pcie->update_fc_fixup = true;
+
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
+ pcie->supports_clkreq =
+ of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
+
+ pcie->enable_cdm_check =
+ of_property_read_bool(np, "snps,enable-cdm-check");
+
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
+ return 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
+ req.controller_state.pcie_controller = pcie->cid;
+ req.controller_state.enable = enable;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+ struct pci_bus *child, *root_bus = NULL;
+ struct pci_dev *pdev;
+
+ /*
+ * link doesn't go into L2 state with some of the endpoints with Tegra
+ * if they are not in D0 state. So, need to make sure that immediate
+ * downstream devices are in D0 state before sending PME_TurnOff to put
+ * link into L2 state.
+ * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
+ * 5.2 Link State Power Management (Page #428).
+ */
+
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
+ /* Bring downstream devices to D0 if they are not already in */
+ if (child->parent == pp->bridge->bus) {
+ root_bus = child;
+ break;
+ }
+ }
+
+ if (!root_bus) {
+ dev_err(pcie->dev, "Failed to find downstream devices\n");
+ return;
+ }
+
+ list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ if (PCI_SLOT(pdev->devfn) == 0) {
+ if (pci_set_power_state(pdev, PCI_D0))
+ dev_err(pcie->dev,
+ "Failed to transition %s to D0 state\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
+ if (IS_ERR(pcie->slot_ctl_3v3)) {
+ if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_3v3);
+
+ pcie->slot_ctl_3v3 = NULL;
+ }
+
+ pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
+ if (IS_ERR(pcie->slot_ctl_12v)) {
+ if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_12v);
+
+ pcie->slot_ctl_12v = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ if (pcie->slot_ctl_3v3) {
+ ret = regulator_enable(pcie->slot_ctl_3v3);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 3.3V slot supply: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pcie->slot_ctl_12v) {
+ ret = regulator_enable(pcie->slot_ctl_12v);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 12V slot supply: %d\n", ret);
+ goto fail_12v_enable;
+ }
+ }
+
+ /*
+ * According to PCI Express Card Electromechanical Specification
+ * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
+ * should be a minimum of 100ms.
+ */
+ if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
+ msleep(100);
+
+ return 0;
+
+fail_12v_enable:
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+ return ret;
+}
+
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ if (pcie->slot_ctl_12v)
+ regulator_disable(pcie->slot_ctl_12v);
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+}
+
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+ bool en_hw_hot_rst)
+{
+ int ret;
+ u32 val;
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev,
+ "Failed to enable controller %u: %d\n", pcie->cid, ret);
+ return ret;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = tegra_pcie_enable_slot_regulators(pcie);
+ if (ret < 0)
+ goto fail_slot_reg_en;
+
+ ret = regulator_enable(pcie->pex_ctl_supply);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
+ goto fail_reg_en;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
+ ret);
+ goto fail_core_apb_rst;
+ }
+
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
+ /* Enable HW_HOT_RST mode */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Update CFG base address */
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ /* Configure this core for RP mode operation */
+ appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ if (!pcie->supports_clkreq) {
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ /* Update iATU_DMA base address */
+ appl_writel(pcie,
+ pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ reset_control_deassert(pcie->core_rst);
+
+ return ret;
+
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk:
+ regulator_disable(pcie->pex_ctl_supply);
+fail_reg_en:
+ tegra_pcie_disable_slot_regulators(pcie);
+fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+
+ return ret;
+}
+
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ ret = reset_control_assert(pcie->core_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
+
+ tegra_pcie_disable_phy(pcie);
+
+ ret = reset_control_assert(pcie->core_apb_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ ret = regulator_disable(pcie->pex_ctl_supply);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
+
+ tegra_pcie_disable_slot_regulators(pcie);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
+ pcie->cid, ret);
+}
+
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = tegra_pcie_config_controller(pcie, false);
+ if (ret < 0)
+ return ret;
+
+ pp->ops = &tegra_pcie_dw_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
+ return 0;
+
+ val = appl_readl(pcie, APPL_RADM_STATUS);
+ val |= APPL_PM_XMT_TURNOFF_STATE;
+ appl_writel(pcie, val, APPL_RADM_STATUS);
+
+ return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
+ val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+ 1, PME_ACK_TIMEOUT);
+}
+
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+{
+ u32 data;
+ int err;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+ dev_dbg(pcie->dev, "PCIe link is not up...!\n");
+ return;
+ }
+
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
+ if (tegra_pcie_try_link_l2(pcie)) {
+ dev_info(pcie->dev, "Link didn't transition to L2 state\n");
+ /*
+ * TX lane clock freq will reset to Gen1 only if link is in L2
+ * or detect state.
+ * So apply pex_rst to end point to force RP to go into detect
+ * state
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, data, APPL_PINMUX);
+
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
+ err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
+ data,
+ ((data &
+ APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (err)
+ dev_info(pcie->dev, "Link didn't go to detect state\n");
+ }
+ /*
+ * DBI registers may not be accessible after this as PLL-E would be
+ * down depending on how CLKREQ is pulled by end point
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
+ /* Cut REFCLK to slot */
+ data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, data, APPL_PINMUX);
+}
+
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+{
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ dw_pcie_host_deinit(&pcie->pci.pp);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+}
+
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+{
+ struct device *dev = pcie->dev;
+ char *name;
+ int ret;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+ if (!pcie->link_state) {
+ ret = -ENOMEDIUM;
+ goto fail_host_init;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto fail_host_init;
+ }
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ init_debugfs(pcie);
+
+ return ret;
+
+fail_host_init:
+ tegra_pcie_deinit_controller(pcie);
+fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+ u16 val_16;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
+{
+ const struct tegra_pcie_dw_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *atu_dma_res;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ struct phy **phys;
+ char *name;
+ int ret;
+ u32 i;
+
+ data = of_device_get_match_data(dev);
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = &pdev->dev;
+ pci->ops = &tegra_dw_pcie_ops;
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
+ pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
+
+ ret = tegra_pcie_dw_parse_dt(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
+ return ret;
+ }
+
+ ret = tegra_pcie_get_slot_regulators(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
+ return ret;
+ }
+
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
+ pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
+ if (IS_ERR(pcie->pex_ctl_supply)) {
+ ret = PTR_ERR(pcie->pex_ctl_supply);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get regulator: %ld\n",
+ PTR_ERR(pcie->pex_ctl_supply));
+ return ret;
+ }
+
+ pcie->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->core_clk)) {
+ dev_err(dev, "Failed to get core clock: %ld\n",
+ PTR_ERR(pcie->core_clk));
+ return PTR_ERR(pcie->core_clk);
+ }
+
+ pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "appl");
+ if (!pcie->appl_res) {
+ dev_err(dev, "Failed to find \"appl\" region\n");
+ return -ENODEV;
+ }
+
+ pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
+ if (IS_ERR(pcie->appl_base))
+ return PTR_ERR(pcie->appl_base);
+
+ pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(pcie->core_apb_rst)) {
+ dev_err(dev, "Failed to get APB reset: %ld\n",
+ PTR_ERR(pcie->core_apb_rst));
+ return PTR_ERR(pcie->core_apb_rst);
+ }
+
+ phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
+ if (!phys)
+ return -ENOMEM;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ name = kasprintf(GFP_KERNEL, "p2u-%u", i);
+ if (!name) {
+ dev_err(dev, "Failed to create P2U string\n");
+ return -ENOMEM;
+ }
+ phys[i] = devm_phy_get(dev, name);
+ kfree(name);
+ if (IS_ERR(phys[i])) {
+ ret = PTR_ERR(phys[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get PHY: %d\n", ret);
+ return ret;
+ }
+ }
+
+ pcie->phys = phys;
+
+ atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "atu_dma");
+ if (!atu_dma_res) {
+ dev_err(dev, "Failed to find \"atu_dma\" region\n");
+ return -ENODEV;
+ }
+ pcie->atu_dma_res = atu_dma_res;
+
+ pci->atu_size = resource_size(atu_dma_res);
+ pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+
+ pcie->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(pcie->core_rst)) {
+ dev_err(dev, "Failed to get core reset: %ld\n",
+ PTR_ERR(pcie->core_rst));
+ return PTR_ERR(pcie->core_rst);
+ }
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pcie->bpmp = tegra_bpmp_get(dev);
+ if (IS_ERR(pcie->bpmp))
+ return PTR_ERR(pcie->bpmp);
+
+ platform_set_drvdata(pdev, pcie);
+
+ pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
+ ret = PTR_ERR_OR_ZERO(pcie->icc_path);
+ if (ret) {
+ tegra_bpmp_put(pcie->bpmp);
+ dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
+ return ret;
+ }
+
+ switch (pcie->of_data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
+ }
+
+fail:
+ tegra_bpmp_put(pcie->bpmp);
+ return ret;
+}
+
+static void tegra_pcie_dw_remove(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+
+ pm_runtime_disable(pcie->dev);
+ tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Enable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->link_state)
+ return 0;
+
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->link_state)
+ return 0;
+
+ ret = tegra_pcie_config_controller(pcie, true);
+ if (ret < 0)
+ return ret;
+
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init host: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ dw_pcie_setup_rc(&pcie->pci.pp);
+
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_dw_resume_early(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Disable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
+
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
+
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+}
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
+ {
+ .compatible = "nvidia,tegra194-pcie",
+ .data = &tegra194_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
+ },
+ {}
+};
+
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
+};
+
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove_new = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
+ .driver = {
+ .name = "tegra194-pcie",
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
+ },
+};
+module_platform_driver(tegra_pcie_dw_driver);
+
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+
+MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 000000000..cba3c88fc
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct uniphier_pcie_ep_soc_data *data;
+};
+
+struct uniphier_pcie_ep_soc_data {
+ bool has_gio;
+ void (*init)(struct uniphier_pcie_ep_priv *priv);
+ int (*wait)(struct uniphier_pcie_ep_priv *priv);
+ const struct pci_epc_features features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(priv->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, priv->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(priv->pci.dev,
+ "Failed to initialize controller in EP mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return uniphier_pcie_ep_raise_legacy_irq(ep);
+ case PCI_EPC_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return &priv->data->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .ep_init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->init)
+ priv->data->init(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ if (priv->data->wait) {
+ ret = priv->data->wait(priv);
+ if (ret)
+ goto out_phy_exit;
+ }
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(priv->phy);
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (priv->data->has_gio) {
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+ }
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ return dw_pcie_ep_init(&priv->pci.ep);
+}
+
+static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
+ .has_gio = true,
+ .init = uniphier_pcie_pro5_init_ep,
+ .wait = NULL,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+ },
+};
+
+static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
+ .has_gio = false,
+ .init = uniphier_pcie_nx1_init_ep,
+ .wait = uniphier_pcie_nx1_wait_ep,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 12,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ },
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-ep",
+ .data = &uniphier_nx1_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
new file mode 100644
index 000000000..48c3eba81
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+#define PCL_RCV_INT 0x8108
+#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17)
+#define PCL_CFG_BW_MGT_STATUS BIT(4)
+#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3)
+#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2)
+#define PCL_CFG_PME_MSI_STATUS BIT(1)
+
+#define PCL_RCV_INTX 0x810c
+#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16)
+#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8)
+#define PCL_RCV_INTX_MASK_SHIFT 8
+#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0)
+#define PCL_RCV_INTX_STATUS_SHIFT 0
+
+#define PCL_STATUS_LINK 0x8140
+#define PCL_RDLH_LINK_UP BIT(1)
+#define PCL_XMLH_LINK_UP BIT(0)
+
+struct uniphier_pcie {
+ struct dw_pcie pci;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct irq_domain *legacy_irq_domain;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, pcie->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie)
+{
+ u32 val;
+
+ /* set RC MODE */
+ val = readl(pcie->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, pcie->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(pcie->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, pcie->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(pcie->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, pcie->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(pcie, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(pcie->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, pcie->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(pcie->pci.dev,
+ "Failed to initialize controller in RC mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_link_up(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ u32 val, mask;
+
+ val = readl(pcie->base + PCL_STATUS_LINK);
+ mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(pcie, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(pcie, false);
+}
+
+static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
+{
+ writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX);
+}
+
+
+static void uniphier_pcie_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void uniphier_pcie_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip uniphier_pcie_irq_chip = {
+ .name = "PCI",
+ .irq_mask = uniphier_pcie_irq_mask,
+ .irq_unmask = uniphier_pcie_irq_unmask,
+};
+
+static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops uniphier_intx_domain_ops = {
+ .map = uniphier_pcie_intx_map,
+};
+
+static void uniphier_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long reg;
+ u32 val, bit;
+
+ /* INT for debug */
+ val = readl(pcie->base + PCL_RCV_INT);
+
+ if (val & PCL_CFG_BW_MGT_STATUS)
+ dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
+ if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
+ dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
+ if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
+ dev_dbg(pci->dev, "Root Error\n");
+ if (val & PCL_CFG_PME_MSI_STATUS)
+ dev_dbg(pci->dev, "PME Interrupt\n");
+
+ writel(val, pcie->base + PCL_RCV_INT);
+
+ /* INTx */
+ chained_irq_enter(chip, desc);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
+ reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
+
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ struct device_node *np = pci->dev->of_node;
+ struct device_node *np_intc;
+ int ret = 0;
+
+ np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!np_intc) {
+ dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ pp->irq = irq_of_parse_and_map(np_intc, 0);
+ if (!pp->irq) {
+ dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto out_put_node;
+ }
+
+ pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ &uniphier_intx_domain_ops, pp);
+ if (!pcie->legacy_irq_domain) {
+ dev_err(pci->dev, "Failed to get INTx domain\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
+ pp);
+
+out_put_node:
+ of_node_put(np_intc);
+ return ret;
+}
+
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ int ret;
+
+ ret = uniphier_pcie_config_legacy_irq(pp);
+ if (ret)
+ return ret;
+
+ uniphier_pcie_irq_enable(pcie);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
+ .host_init = uniphier_pcie_host_init,
+};
+
+static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(pcie->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pcie_init_rc(pcie);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ goto out_rst_assert;
+
+ ret = uniphier_pcie_wait_rc(pcie);
+ if (ret)
+ goto out_phy_exit;
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(pcie->phy);
+out_rst_assert:
+ reset_control_assert(pcie->rst);
+out_clk_disable:
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+ .link_up = uniphier_pcie_link_up,
+};
+
+static int uniphier_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &dw_pcie_ops;
+
+ pcie->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ pcie->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(pcie->rst))
+ return PTR_ERR(pcie->rst);
+
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = uniphier_pcie_host_enable(pcie);
+ if (ret)
+ return ret;
+
+ pcie->pci.pp.ops = &uniphier_pcie_host_ops;
+
+ return dw_pcie_host_init(&pcie->pci.pp);
+}
+
+static const struct of_device_id uniphier_pcie_match[] = {
+ { .compatible = "socionext,uniphier-pcie", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_driver = {
+ .probe = uniphier_pcie_probe,
+ .driver = {
+ .name = "uniphier-pcie",
+ .of_match_table = uniphier_pcie_match,
+ },
+};
+builtin_platform_driver(uniphier_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000..71026fefa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
new file mode 100644
index 000000000..58ce034f7
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Mobiveil-based PCIe controllers"
+ depends on PCI
+
+config PCIE_MOBIVEIL
+ bool
+
+config PCIE_MOBIVEIL_HOST
+ bool
+ depends on PCI_MSI
+ select PCIE_MOBIVEIL
+
+config PCIE_LAYERSCAPE_GEN4
+ bool "Freescale Layerscape Gen4 PCIe controller"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs.
+
+config PCIE_MOBIVEIL_PLAT
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
+endmenu
diff --git a/drivers/pci/controller/mobiveil/Makefile b/drivers/pci/controller/mobiveil/Makefile
new file mode 100644
index 000000000..99d879de3
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
+obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
new file mode 100644
index 000000000..d7b7350f0
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Gen4 host controller driver for NXP Layerscape SoCs
+ *
+ * Copyright 2019-2020 NXP
+ *
+ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-mobiveil.h"
+
+/* LUT and PF control registers */
+#define PCIE_LUT_OFF 0x80000
+#define PCIE_PF_OFF 0xc0000
+#define PCIE_PF_INT_STAT 0x18
+#define PF_INT_STAT_PABRST BIT(31)
+
+#define PCIE_PF_DBG 0x7fc
+#define PF_DBG_LTSSM_MASK 0x3f
+#define PF_DBG_LTSSM_L0 0x2d /* L0 state */
+#define PF_DBG_WE BIT(31)
+#define PF_DBG_PABR BIT(27)
+
+#define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev)
+
+struct ls_g4_pcie {
+ struct mobiveil_pcie pci;
+ struct delayed_work dwork;
+ int irq;
+};
+
+static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+{
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
+ u32 state;
+
+ state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
+ state = state & PF_DBG_LTSSM_MASK;
+
+ if (state == PF_DBG_LTSSM_L0)
+ return 1;
+
+ return 0;
+}
+
+static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+
+ mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ /* Clear the interrupt status */
+ mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
+
+ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
+ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ struct device *dev = &mv_pci->pdev->dev;
+ u32 val, act_stat;
+ int to = 100;
+
+ /* Poll for pab_csb_reset to set and PAB activity to clear */
+ do {
+ usleep_range(10, 15);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT);
+ act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
+ } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
+ if (to < 0) {
+ dev_err(dev, "Poll PABRST&PABACT timeout\n");
+ return -EIO;
+ }
+
+ /* clear PEX_RESET bit in PEX_PF0_DBG register */
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_WE;
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_PABR;
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
+ val &= ~PF_DBG_WE;
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ mobiveil_host_init(mv_pci, true);
+
+ to = 100;
+ while (!ls_g4_pcie_link_up(mv_pci) && to--)
+ usleep_range(200, 250);
+ if (to < 0) {
+ dev_err(dev, "PCIe link training timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id)
+{
+ struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id;
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PAB_INTP_RESET) {
+ ls_g4_pcie_disable_interrupt(pcie);
+ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
+ }
+
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
+
+ return IRQ_HANDLED;
+}
+
+static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
+{
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci);
+ struct platform_device *pdev = mv_pci->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "intr");
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ls_g4_pcie_reset(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u16 ctrl;
+
+ ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
+
+ if (!ls_g4_pcie_reinit_hw(pcie))
+ return;
+
+ ls_g4_pcie_enable_interrupt(pcie);
+}
+
+static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+ .interrupt_init = ls_g4_pcie_interrupt_init,
+};
+
+static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = {
+ .link_up = ls_g4_pcie_link_up,
+};
+
+static int __init ls_g4_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct mobiveil_pcie *mv_pci;
+ struct ls_g4_pcie *pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_parse_phandle(np, "msi-parent", 0)) {
+ dev_err(dev, "Failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ mv_pci = &pcie->pci;
+
+ mv_pci->pdev = pdev;
+ mv_pci->ops = &ls_g4_pcie_pab_ops;
+ mv_pci->rp.ops = &ls_g4_pcie_rp_ops;
+ mv_pci->rp.bridge = bridge;
+
+ platform_set_drvdata(pdev, pcie);
+
+ INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset);
+
+ ret = mobiveil_pcie_host_probe(mv_pci);
+ if (ret) {
+ dev_err(dev, "Fail to probe\n");
+ return ret;
+ }
+
+ ls_g4_pcie_enable_interrupt(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id ls_g4_pcie_of_match[] = {
+ { .compatible = "fsl,lx2160a-pcie", },
+ { },
+};
+
+static struct platform_driver ls_g4_pcie_driver = {
+ .driver = {
+ .name = "layerscape-pcie-gen4",
+ .of_match_table = ls_g4_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
new file mode 100644
index 000000000..45b97a4b1
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019-2020 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pcie-mobiveil.h"
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ /* Only one device down on each root port */
+ if (pci_is_root_bus(bus) && (devfn > 0))
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+ if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0))
+ return false;
+
+ return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ u32 value;
+
+ if (!mobiveil_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ /* RC config access */
+ if (pci_is_root_bus(bus))
+ return pcie->csr_axi_slave_base + where;
+
+ /*
+ * EP config access (in Config/APIO space)
+ * Program PEX Address base (31..16 bits) with appropriate value
+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+ * Relies on pci_lock serialization
+ */
+ value = bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
+
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+
+ return rp->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+ .map_bus = mobiveil_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct device *dev = &pcie->pdev->dev;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct mobiveil_msi *msi = &rp->msi;
+ u32 msi_data, msi_addr_lo, msi_addr_hi;
+ u32 intr_status, msi_status;
+ unsigned long shifted_status;
+ u32 bit, val, mask;
+
+ /*
+ * The core provides a single interrupt for both INTx/MSI messages.
+ * So we'll read both INTx and MSI status
+ */
+
+ chained_irq_enter(chip, desc);
+
+ /* read INTx status */
+ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ intr_status = val & mask;
+
+ /* Handle INTx */
+ if (intr_status & PAB_INTP_INTX_MASK) {
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
+ do {
+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+ int ret;
+ ret = generic_handle_domain_irq(rp->intx_domain,
+ bit + 1);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
+ bit);
+
+ /* clear interrupt handled */
+ mobiveil_csr_writel(pcie,
+ 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
+ }
+
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
+ } while (shifted_status != 0);
+ }
+
+ /* read extra MSI status register */
+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+ /* handle MSI interrupts */
+ while (msi_status & 1) {
+ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
+
+ /*
+ * MSI_STATUS_OFFSET register gets updated to zero
+ * once we pop not only the MSI data but also address
+ * from MSI hardware FIFO. So keeping these following
+ * two dummy reads.
+ */
+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_L_OFFSET);
+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_H_OFFSET);
+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+ msi_data, msi_addr_hi, msi_addr_lo);
+
+ generic_handle_domain_irq(msi->dev_domain, msi_data);
+
+ msi_status = readl_relaxed(pcie->apb_csr_base +
+ MSI_STATUS_OFFSET);
+ }
+
+ /* Clear the interrupt status */
+ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct platform_device *pdev = pcie->pdev;
+ struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct resource *res;
+
+ /* map config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "config_axi_slave");
+ rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rp->config_axi_slave_base))
+ return PTR_ERR(rp->config_axi_slave_base);
+ rp->ob_io_res = res;
+
+ /* map csr resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "csr_axi_slave");
+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->csr_axi_slave_base))
+ return PTR_ERR(pcie->csr_axi_slave_base);
+ pcie->pcie_reg_base = res->start;
+
+ /* read the number of windows requested */
+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+ pcie->apio_wins = MAX_PIO_WINDOWS;
+
+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+ pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+ return 0;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+ phys_addr_t msg_addr = pcie->pcie_reg_base;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
+
+ msi->num_of_vectors = PCI_NUM_MSI;
+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+ writel_relaxed(lower_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ writel_relaxed(upper_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
+ u32 value, pab_ctrl, type;
+ struct resource_entry *win;
+
+ pcie->ib_wins_configured = 0;
+ pcie->ob_wins_configured = 0;
+
+ if (!reinit) {
+ /* setup bus numbers */
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ }
+
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+ */
+ value = mobiveil_csr_readl(pcie, PCI_COMMAND);
+ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ mobiveil_csr_writel(pcie, value, PCI_COMMAND);
+
+ /*
+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+ * register
+ */
+ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
+ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
+ mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
+
+ /*
+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+ * PAB_AXI_PIO_CTRL Register
+ */
+ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ value |= APIO_EN_MASK;
+ mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+
+ /* Enable PCIe PIO master */
+ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value |= 1 << PIO_ENABLE_SHIFT;
+ mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+
+ /*
+ * we'll program one outbound window for config reads and
+ * another default inbound window for all the upstream traffic
+ * rest of the outbound windows will be configured according to
+ * the "ranges" field defined in device tree
+ */
+
+ /* config outbound translation window */
+ program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(rp->ob_io_res));
+
+ /* memory inbound translation window */
+ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &bridge->windows) {
+ if (resource_type(win->res) == IORESOURCE_MEM)
+ type = MEM_WINDOW_TYPE;
+ else if (resource_type(win->res) == IORESOURCE_IO)
+ type = IO_WINDOW_TYPE;
+ else
+ continue;
+
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start,
+ win->res->start - win->offset,
+ type, resource_size(win->res));
+ }
+
+ /* fixup for PCIe class register */
+ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value &= 0xff;
+ value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
+ return 0;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct mobiveil_root_port *rp;
+ unsigned long flags;
+ u32 mask, shifted_val;
+
+ rp = &pcie->rp;
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val &= ~mask;
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct mobiveil_root_port *rp;
+ unsigned long flags;
+ u32 shifted_val, mask;
+
+ rp = &pcie->rp;
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val |= mask;
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mobiveil_pcie:intx",
+ .irq_enable = mobiveil_unmask_intx_irq,
+ .irq_disable = mobiveil_mask_intx_irq,
+ .irq_mask = mobiveil_mask_intx_irq,
+ .irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+ .name = "Mobiveil PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+ .name = "Mobiveil MSI",
+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
+ .irq_set_affinity = mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mobiveil_pcie *pcie = domain->host_data;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+ domain->host_data, handle_level_irq, NULL, NULL);
+ return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct mobiveil_msi *msi = &pcie->rp.msi;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mobiveil_irq_msi_domain_alloc,
+ .free = mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mobiveil_msi *msi = &pcie->rp.msi;
+
+ mutex_init(&msi->lock);
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &mobiveil_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
+
+ /* setup INTx */
+ rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+
+ if (!rp->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_init(&rp->intx_mask_lock);
+
+ /* setup MSI */
+ return mobiveil_allocate_msi_domains(pcie);
+}
+
+static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
+{
+ struct platform_device *pdev = pcie->pdev;
+ struct device *dev = &pdev->dev;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct resource *res;
+ int ret;
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ rp->irq = platform_get_irq(pdev, 0);
+ if (rp->irq < 0)
+ return rp->irq;
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie);
+
+ /* Enable interrupts */
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+
+ return 0;
+}
+
+static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+
+ if (rp->ops->interrupt_init)
+ return rp->ops->interrupt_init(pcie);
+
+ return mobiveil_pcie_integrated_interrupt_init(pcie);
+}
+
+static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
+{
+ u32 header_type;
+
+ header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
+ struct device *dev = &pcie->pdev->dev;
+ int ret;
+
+ ret = mobiveil_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+ return ret;
+ }
+
+ if (!mobiveil_pcie_is_bridge(pcie))
+ return -ENODEV;
+
+ /*
+ * configure all inbound and outbound windows and prepare the RC for
+ * config access
+ */
+ ret = mobiveil_host_init(pcie, false);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ return ret;
+ }
+
+ ret = mobiveil_pcie_interrupt_init(pcie);
+ if (ret) {
+ dev_err(dev, "Interrupt init failed\n");
+ return ret;
+ }
+
+ /* Initialize bridge */
+ bridge->sysdata = pcie;
+ bridge->ops = &mobiveil_pcie_ops;
+
+ ret = mobiveil_bringup_link(pcie);
+ if (ret) {
+ dev_info(dev, "link bring-up failed\n");
+ return ret;
+ }
+
+ return pci_host_probe(bridge);
+}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
new file mode 100644
index 000000000..c5bb87ff6
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pcie-mobiveil.h"
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->rp.bridge = bridge;
+
+ pcie->pdev = pdev;
+
+ return mobiveil_pcie_host_probe(pcie);
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
new file mode 100644
index 000000000..62ecbaeb0
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-mobiveil.h"
+
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie,
+ u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void __iomem *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
+{
+ void __iomem *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ if (pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ib_wins_configured++;
+}
+
+/*
+ * routine to program the outbound windows
+ */
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
new file mode 100644
index 000000000..6082b8afb
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#ifndef _PCIE_MOBIVEIL_H
+#define _PCIE_MOBIVEIL_H
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include "../../pci.h"
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_ACTIVITY_STAT 0x81c
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_RESET BIT(1)
+#define PAB_INTP_MSI BIT(3)
+#define PAB_INTP_INTA BIT(5)
+#define PAB_INTP_INTB BIT(6)
+#define PAB_INTP_INTC BIT(7)
+#define PAB_INTP_INTD BIT(8)
+#define PAB_INTP_PCIE_UE BIT(9)
+#define PAB_INTP_IE_PMREDI BIT(29)
+#define PAB_INTP_IE_EC BIT(30)
+#define PAB_INTP_MSI_MASK PAB_INTP_MSI
+#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
+ PAB_INTP_INTC | PAB_INTP_INTD)
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie;
+
+struct mobiveil_rp_ops {
+ int (*interrupt_init)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_root_port {
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ struct resource *ob_io_res;
+ struct mobiveil_rp_ops *ops;
+ int irq;
+ raw_spinlock_t intx_mask_lock;
+ struct irq_domain *intx_domain;
+ struct mobiveil_msi msi;
+ struct pci_host_bridge *bridge;
+};
+
+struct mobiveil_pab_ops {
+ int (*link_up)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ const struct mobiveil_pab_ops *ops;
+ struct mobiveil_root_port rp;
+};
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size);
+
+static inline u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x4);
+}
+
+static inline u16 mobiveil_csr_readw(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x2);
+}
+
+static inline u8 mobiveil_csr_readb(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x1);
+}
+
+
+static inline void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x4);
+}
+
+static inline void mobiveil_csr_writew(struct mobiveil_pcie *pcie, u16 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x2);
+}
+
+static inline void mobiveil_csr_writeb(struct mobiveil_pcie *pcie, u8 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x1);
+}
+
+#endif /* _PCIE_MOBIVEIL_H */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
new file mode 100644
index 000000000..71ecd7ddc
--- /dev/null
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -0,0 +1,2011 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Aardvark PCIe controller, used on Marvell Armada
+ * 3700.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/init.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+
+#include "../pci.h"
+#include "../pci-bridge-emul.h"
+
+/* PCIe core registers */
+#define PCIE_CORE_DEV_ID_REG 0x0
+#define PCIE_CORE_CMD_STATUS_REG 0x4
+#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_SSDEV_ID_REG 0x2c
+#define PCIE_CORE_PCIEXP_CAP 0xc0
+#define PCIE_CORE_PCIERR_CAP 0x100
+#define PCIE_CORE_ERR_CAPCTL_REG 0x118
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
+/* PIO registers base address and register offsets */
+#define PIO_BASE_ADDR 0x4000
+#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
+#define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
+#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
+#define PIO_STAT (PIO_BASE_ADDR + 0x4)
+#define PIO_COMPLETION_STATUS_SHIFT 7
+#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
+#define PIO_COMPLETION_STATUS_OK 0
+#define PIO_COMPLETION_STATUS_UR 1
+#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_CA 4
+#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
+#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
+#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
+#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
+#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
+#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
+#define PIO_START (PIO_BASE_ADDR + 0x1c)
+#define PIO_ISR (PIO_BASE_ADDR + 0x20)
+#define PIO_ISRM (PIO_BASE_ADDR + 0x24)
+
+/* Aardvark Control registers */
+#define CONTROL_BASE_ADDR 0x4800
+#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
+#define PCIE_GEN_SEL_MSK 0x3
+#define PCIE_GEN_SEL_SHIFT 0x0
+#define SPEED_GEN_1 0
+#define SPEED_GEN_2 1
+#define SPEED_GEN_3 2
+#define IS_RC_MSK 1
+#define IS_RC_SHIFT 2
+#define LANE_CNT_MSK 0x18
+#define LANE_CNT_SHIFT 0x3
+#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
+#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
+#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
+#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
+#define LINK_TRAINING_EN BIT(6)
+#define LEGACY_INTA BIT(28)
+#define LEGACY_INTB BIT(29)
+#define LEGACY_INTC BIT(30)
+#define LEGACY_INTD BIT(31)
+#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
+#define HOT_RESET_GEN BIT(0)
+#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
+#define PCIE_CORE_CTRL2_RESERVED 0x7
+#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
+#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
+#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
+#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
+#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
+#define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
+#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
+#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_MSG_PM_PME_MASK BIT(7)
+#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
+#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
+#define PCIE_ISR0_CORR_ERR BIT(11)
+#define PCIE_ISR0_NFAT_ERR BIT(12)
+#define PCIE_ISR0_FAT_ERR BIT(13)
+#define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
+#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
+#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
+#define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
+#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
+#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
+#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
+#define PCIE_ISR1_FLUSH BIT(5)
+#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
+#define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
+#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
+#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
+#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
+#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
+#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
+
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_COUNT 8
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_ENABLE BIT(0)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
+#define OB_WIN_FUNC_NUM_SHIFT 24
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
+#define OB_WIN_MSG_CODE_SHIFT 14
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
+#define OB_WIN_ATTR_ENABLE BIT(11)
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
+#define OB_WIN_ATTR_TC_SHIFT 8
+#define OB_WIN_ATTR_RELAXED BIT(7)
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
+#define OB_WIN_ATTR_POISON BIT(5)
+#define OB_WIN_ATTR_IDO BIT(4)
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
+#define OB_WIN_TYPE_SHIFT 0
+#define OB_WIN_TYPE_MEM 0x0
+#define OB_WIN_TYPE_IO 0x4
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
+#define OB_WIN_TYPE_MSG 0xc
+
+/* LMI registers base address and register offsets */
+#define LMI_BASE_ADDR 0x6000
+#define CFG_REG (LMI_BASE_ADDR + 0x0)
+#define LTSSM_SHIFT 24
+#define LTSSM_MASK 0x3f
+#define RC_BAR_CONFIG 0x300
+
+/* LTSSM values in CFG_REG */
+enum {
+ LTSSM_DETECT_QUIET = 0x0,
+ LTSSM_DETECT_ACTIVE = 0x1,
+ LTSSM_POLLING_ACTIVE = 0x2,
+ LTSSM_POLLING_COMPLIANCE = 0x3,
+ LTSSM_POLLING_CONFIGURATION = 0x4,
+ LTSSM_CONFIG_LINKWIDTH_START = 0x5,
+ LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
+ LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
+ LTSSM_CONFIG_LANENUM_WAIT = 0x8,
+ LTSSM_CONFIG_COMPLETE = 0x9,
+ LTSSM_CONFIG_IDLE = 0xa,
+ LTSSM_RECOVERY_RCVR_LOCK = 0xb,
+ LTSSM_RECOVERY_SPEED = 0xc,
+ LTSSM_RECOVERY_RCVR_CFG = 0xd,
+ LTSSM_RECOVERY_IDLE = 0xe,
+ LTSSM_L0 = 0x10,
+ LTSSM_RX_L0S_ENTRY = 0x11,
+ LTSSM_RX_L0S_IDLE = 0x12,
+ LTSSM_RX_L0S_FTS = 0x13,
+ LTSSM_TX_L0S_ENTRY = 0x14,
+ LTSSM_TX_L0S_IDLE = 0x15,
+ LTSSM_TX_L0S_FTS = 0x16,
+ LTSSM_L1_ENTRY = 0x17,
+ LTSSM_L1_IDLE = 0x18,
+ LTSSM_L2_IDLE = 0x19,
+ LTSSM_L2_TRANSMIT_WAKE = 0x1a,
+ LTSSM_DISABLED = 0x20,
+ LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
+ LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
+ LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
+ LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
+ LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
+ LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
+ LTSSM_HOT_RESET = 0x27,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
+};
+
+#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
+
+/* PCIe core controller registers */
+#define CTRL_CORE_BASE_ADDR 0x18000
+#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
+#define CTRL_MODE_SHIFT 0x0
+#define CTRL_MODE_MASK 0x1
+#define PCIE_CORE_MODE_DIRECT 0x0
+#define PCIE_CORE_MODE_COMMAND 0x1
+
+/* PCIe Central Interrupts Registers */
+#define CENTRAL_INT_BASE_ADDR 0x1b000
+#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
+#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
+#define PCIE_IRQ_CMDQ_INT BIT(0)
+#define PCIE_IRQ_MSI_STATUS_INT BIT(1)
+#define PCIE_IRQ_CMD_SENT_DONE BIT(3)
+#define PCIE_IRQ_DMA_INT BIT(4)
+#define PCIE_IRQ_IB_DXFERDONE BIT(5)
+#define PCIE_IRQ_OB_DXFERDONE BIT(6)
+#define PCIE_IRQ_OB_RXFERDONE BIT(7)
+#define PCIE_IRQ_COMPQ_INT BIT(12)
+#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
+#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
+#define PCIE_IRQ_CORE_INT BIT(16)
+#define PCIE_IRQ_CORE_INT_PIO BIT(17)
+#define PCIE_IRQ_DPMU_INT BIT(18)
+#define PCIE_IRQ_PCIE_MIS_INT BIT(19)
+#define PCIE_IRQ_MSI_INT1_DET BIT(20)
+#define PCIE_IRQ_MSI_INT2_DET BIT(21)
+#define PCIE_IRQ_RC_DBELL_DET BIT(22)
+#define PCIE_IRQ_EP_STATUS BIT(23)
+#define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
+#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
+
+/* Transaction types */
+#define PCIE_CONFIG_RD_TYPE0 0x8
+#define PCIE_CONFIG_RD_TYPE1 0x9
+#define PCIE_CONFIG_WR_TYPE0 0xa
+#define PCIE_CONFIG_WR_TYPE1 0xb
+
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
+#define PIO_RETRY_DELAY 2 /* 2 us*/
+
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+#define RETRAIN_WAIT_MAX_RETRIES 10
+#define RETRAIN_WAIT_USLEEP_US 2000
+
+#define MSI_IRQ_NUM 32
+
+#define CFG_RD_CRS_VAL 0xffff0001
+
+struct advk_pcie {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct {
+ phys_addr_t match;
+ phys_addr_t remap;
+ phys_addr_t mask;
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
+ struct irq_domain *rp_irq_domain;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_inner_domain;
+ raw_spinlock_t msi_irq_lock;
+ DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
+ struct mutex msi_used_lock;
+ int link_gen;
+ struct pci_bridge_emul bridge;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+};
+
+static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
+{
+ return readl(pcie->base + reg);
+}
+
+static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
+{
+ u32 val;
+ u8 ltssm_state;
+
+ val = advk_readl(pcie, CFG_REG);
+ ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+ return ltssm_state;
+}
+
+static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
+{
+ /* check if LTSSM is in normal operation - some L* state */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
+{
+ /*
+ * According to PCIe Base specification 3.0, Table 4-14: Link
+ * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
+ * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
+ * L0s, L1 and L2 states. And according to 3.2.1. Data Link
+ * Control and Management State Machine Rules is DL Up status
+ * reported in DL Active state.
+ */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
+{
+ /*
+ * According to PCIe Base specification 3.0, Table 4-14: Link
+ * Status Mapped to the LTSSM is Link Training mapped to LTSSM
+ * Configuration and Recovery states.
+ */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
+ ltssm_state < LTSSM_L0) ||
+ (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
+ ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
+}
+
+static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (advk_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+ size_t retries;
+
+ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+ if (advk_pcie_link_training(pcie))
+ break;
+ udelay(RETRAIN_WAIT_USLEEP_US);
+ }
+}
+
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ if (!pcie->reset_gpio)
+ return;
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void advk_pcie_train_link(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u32 reg;
+ int ret;
+
+ /*
+ * Setup PCIe rev / gen compliance based on device tree property
+ * 'max-link-speed' which also forces maximal link speed.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ if (pcie->link_gen == 3)
+ reg |= SPEED_GEN_3;
+ else if (pcie->link_gen == 2)
+ reg |= SPEED_GEN_2;
+ else
+ reg |= SPEED_GEN_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Set maximal link speed value also into PCIe Link Control 2 register.
+ * Armada 3700 Functional Specification says that default value is based
+ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+ reg &= ~PCI_EXP_LNKCTL2_TLS;
+ if (pcie->link_gen == 3)
+ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ else if (pcie->link_gen == 2)
+ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ else
+ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+
+ /* Enable link training after selecting PCIe generation */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Reset PCIe card via PERST# signal. Some cards are not detected
+ * during link training when they are in some non-initial state.
+ */
+ advk_pcie_issue_perst(pcie);
+
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
+ * fundamental reset. As required by PCI Express spec (PCI Express
+ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
+ * Conventional Reset) a delay for at least 100ms after such a reset
+ * before sending a Configuration Request to the device is needed.
+ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
+ * waits for link at least 900ms.
+ */
+ ret = advk_pcie_wait_for_link(pcie);
+ if (ret < 0)
+ dev_err(dev, "link never came up\n");
+ else
+ dev_info(dev, "link up\n");
+}
+
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+ phys_addr_t match, phys_addr_t remap,
+ phys_addr_t mask, u32 actions)
+{
+ advk_writel(pcie, OB_WIN_ENABLE |
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+{
+ phys_addr_t msi_addr;
+ u32 reg;
+ int i;
+
+ /*
+ * Configure PCIe Reference clock. Direction is from the PCIe
+ * controller to the endpoint card, so enable transmitting of
+ * Reference clock differential signal off-chip and disable
+ * receiving off-chip differential signal.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
+ reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
+ reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
+
+ /* Set to Direct mode */
+ reg = advk_readl(pcie, CTRL_CONFIG_REG);
+ reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
+ reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
+ advk_writel(pcie, reg, CTRL_CONFIG_REG);
+
+ /* Set PCI global control register to RC mode */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= (IS_RC_MSK << IS_RC_SHIFT);
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
+ * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
+ * id in high 16 bits. Updating this register changes readback value of
+ * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
+ * for erratum 4.1: "The value of device and vendor ID is incorrect".
+ */
+ reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
+ advk_writel(pcie, reg, VENDOR_ID_REG);
+
+ /*
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
+ * because the default value is Mass storage controller (0x010400).
+ *
+ * Note that this Aardvark PCI Bridge does not have compliant Type 1
+ * Configuration Space and it even cannot be accessed via Aardvark's
+ * PCI config space access method. Something like config space is
+ * available in internal Aardvark registers starting at offset 0x0
+ * and is reported as Type 0. In range 0x10 - 0x34 it has totally
+ * different registers.
+ *
+ * Therefore driver uses emulation of PCI Bridge which emulates
+ * access to configuration space via internal Aardvark registers or
+ * emulated configuration buffer.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
+ reg &= ~0xffffff00;
+ reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
+ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+
+ /* Set Advanced Error Capabilities and Control PF0 register */
+ reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+ advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+
+ /* Set PCIe Device Control register */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ reg &= ~PCI_EXP_DEVCTL_READRQ;
+ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
+ reg |= PCI_EXP_DEVCTL_READRQ_512B;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+
+ /* Program PCIe Control 2 to disable strict ordering */
+ reg = PCIE_CORE_CTRL2_RESERVED |
+ PCIE_CORE_CTRL2_TD_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Set lane X1 */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LANE_CNT_MSK;
+ reg |= LANE_COUNT_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Set MSI address */
+ msi_addr = virt_to_phys(pcie);
+ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
+
+ /* Enable MSI */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Disable All ISR0/1 and MSI Sources */
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+
+ /* Unmask summary MSI interrupt */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ reg &= ~PCIE_ISR0_MSI_INT_PENDING;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+ /* Unmask PME interrupt for processing of PME requester */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ reg &= ~PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+ /* Enable summary interrupt for GIC SPI source */
+ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+ advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+
+ /*
+ * Enable AXI address window location generation:
+ * When it is enabled, the default outbound window
+ * configurations (Default User Field: 0xD0074CFC)
+ * are used to transparent address translation for
+ * the outbound transactions. Thus, PCIe address
+ * windows are not required for transparent memory
+ * access when default outbound window configuration
+ * is set for memory access.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /*
+ * Set memory access in Default User Field so it
+ * is not required to configure PCIe address for
+ * transparent memory access.
+ */
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+ /*
+ * Bypass the address window mapping for PIO:
+ * Since PIO access already contains all required
+ * info over AXI interface by PIO registers, the
+ * address window is not required.
+ */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /*
+ * Configure PCIe address windows for non-memory or
+ * non-transparent access as by default PCIe uses
+ * transparent memory access.
+ */
+ for (i = 0; i < pcie->wins_count; i++)
+ advk_pcie_set_ob_win(pcie, i,
+ pcie->wins[i].match, pcie->wins[i].remap,
+ pcie->wins[i].mask, pcie->wins[i].actions);
+
+ /* Disable remaining PCIe outbound windows */
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
+ advk_pcie_train_link(pcie);
+}
+
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u32 reg;
+ unsigned int status;
+ char *strcomp_status, *str_posted;
+ int ret;
+
+ reg = advk_readl(pcie, PIO_STAT);
+ status = (reg & PIO_COMPLETION_STATUS_MASK) >>
+ PIO_COMPLETION_STATUS_SHIFT;
+
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
+ switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ ret = -EFAULT;
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ ret = 0;
+ break;
+ case PIO_COMPLETION_STATUS_UR:
+ strcomp_status = "UR";
+ ret = -EOPNOTSUPP;
+ break;
+ case PIO_COMPLETION_STATUS_CRS:
+ if (allow_crs && val) {
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is enabled:
+ * For a Configuration Read Request that includes both
+ * bytes of the Vendor ID field of a device Function's
+ * Configuration Space Header, the Root Complex must
+ * complete the Request to the host by returning a
+ * read-data value of 0001h for the Vendor ID field and
+ * all '1's for any additional bytes included in the
+ * request.
+ *
+ * So CRS in this case is not an error status.
+ */
+ *val = CFG_RD_CRS_VAL;
+ strcomp_status = NULL;
+ ret = 0;
+ break;
+ }
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * If CRS Software Visibility is enabled: For a Configuration
+ * Write Request or for any other Configuration Read Request,
+ * the Root Complex must re-issue the Configuration Request as
+ * a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * So return -EAGAIN and caller (pci-aardvark.c driver) will
+ * re-issue request again up to the PIO_RETRY_CNT retries.
+ */
+ strcomp_status = "CRS";
+ ret = -EAGAIN;
+ break;
+ case PIO_COMPLETION_STATUS_CA:
+ strcomp_status = "CA";
+ ret = -ECANCELED;
+ break;
+ default:
+ strcomp_status = "Unknown";
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!strcomp_status)
+ return ret;
+
+ if (reg & PIO_NON_POSTED_REQ)
+ str_posted = "Non-posted";
+ else
+ str_posted = "Posted";
+
+ dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return ret;
+}
+
+static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ int i;
+
+ for (i = 1; i <= PIO_RETRY_CNT; i++) {
+ u32 start, isr;
+
+ start = advk_readl(pcie, PIO_START);
+ isr = advk_readl(pcie, PIO_ISR);
+ if (!start && isr)
+ return i;
+ udelay(PIO_RETRY_DELAY);
+ }
+
+ dev_err(dev, "PIO read/write transfer time out\n");
+ return -ETIMEDOUT;
+}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
+ val &= ~(PCI_BRIDGE_CTL_SERR << 16);
+ else
+ val |= PCI_BRIDGE_CTL_SERR << 16;
+ if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+ else
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ /*
+ * According to Figure 6-3: Pseudo Logic Diagram for Error
+ * Message Controls in PCIe base specification, SERR# Enable bit
+ * in Bridge Control register enable receiving of ERR_* messages
+ */
+ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ if (new & (PCI_BRIDGE_CTL_SERR << 16))
+ val &= ~PCIE_ISR0_ERR_MASK;
+ else
+ val |= PCIE_ISR0_ERR_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ }
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+ val |= HOT_RESET_GEN;
+ else
+ val &= ~HOT_RESET_GEN;
+ advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+
+ switch (reg) {
+ /*
+ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
+ * also supported, but do not need to be handled here, because their
+ * values are stored in emulated config space buffer, and we read them
+ * from there when needed.
+ */
+
+ case PCI_EXP_LNKCAP: {
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ /*
+ * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
+ * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
+ * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
+ */
+ val |= PCI_EXP_LNKCAP_DLLLARC;
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_EXP_LNKCTL: {
+ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+ ~(PCI_EXP_LNKSTA_LT << 16);
+ if (advk_pcie_link_training(pcie))
+ val |= (PCI_EXP_LNKSTA_LT << 16);
+ if (advk_pcie_link_active(pcie))
+ val |= (PCI_EXP_LNKSTA_DLLLA << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+}
+
+static void
+advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ if (new & PCI_EXP_LNKCTL_RL)
+ advk_pcie_wait_for_retrain(pcie);
+ break;
+
+ case PCI_EXP_RTCTL: {
+ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
+ /* Only emulation of PMEIE and CRSSVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
+ break;
+ }
+
+ /*
+ * PCI_EXP_RTSTA is also supported, but does not need to be handled
+ * here, because its value is stored in emulated config space buffer,
+ * and we write it there when needed.
+ */
+
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCTL2:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case 0:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+
+ /*
+ * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
+ * 3700 Functional Specification does not document registers
+ * at those addresses.
+ *
+ * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
+ * Reporting Capability header the last Extended Capability.
+ * If we obtain documentation for those registers in the
+ * future, this can be changed.
+ */
+ *value &= 0x000fffff;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ .read_base = advk_pci_bridge_emul_base_conf_read,
+ .write_base = advk_pci_bridge_emul_base_conf_write,
+ .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+ .read_ext = advk_pci_bridge_emul_ext_conf_read,
+ .write_ext = advk_pci_bridge_emul_ext_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+{
+ struct pci_bridge_emul *bridge = &pcie->bridge;
+
+ bridge->conf.vendor =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+ bridge->conf.device =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
+ bridge->conf.class_revision =
+ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
+
+ /* Support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Support 64 bits memory pref */
+ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+
+ /* Support interrupt A for MSI feature */
+ bridge->conf.intpin = PCI_INTERRUPT_INTA;
+
+ /*
+ * Aardvark HW provides PCIe Capability structure in version 2 and
+ * indicate slot support, which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently since there is no support
+ * for unplugging the card nor detecting whether it is plugged. (If a
+ * platform exists in the future that supports it, via a GPIO for
+ * example, it should be implemented via this bit.)
+ *
+ * Set physical slot number to 1 since there is only one port and zero
+ * value is reserved for ports within the same silicon as Root Port
+ * which is not our case.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
+ 1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
+
+ /* Indicates supports for Completion Retry Status */
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
+ bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
+ bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
+ bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
+ bridge->data = pcie;
+ bridge->ops = &advk_pci_bridge_emul_ops;
+
+ return pci_bridge_emul_init(bridge, 0);
+}
+
+static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ int devfn)
+{
+ if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
+ return false;
+
+ /*
+ * If the link goes down after we check for link-up, we have a problem:
+ * if a PIO request is executed while link-down, the whole controller
+ * gets stuck in a non-functional state, and even after link comes up
+ * again, PIO requests won't work anymore, and a reset of the whole PCIe
+ * controller is needed. Therefore we need to prevent sending PIO
+ * requests while the link is down.
+ */
+ if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
+ return false;
+
+ return true;
+}
+
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ int retry_count;
+ bool allow_crs;
+ u32 reg;
+ int ret;
+
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pci_is_root_bus(bus))
+ return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ size, val);
+
+ /*
+ * Completion Retry Status is possible to return only when reading all
+ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
+ * CRSSVE flag on Root Bridge is enabled.
+ */
+ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
+ PCI_EXP_RTCTL_CRSSVE);
+
+ if (advk_pcie_pio_is_running(pcie))
+ goto try_crs;
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (pci_is_root_bus(bus->parent))
+ reg |= PCIE_CONFIG_RD_TYPE0;
+ else
+ reg |= PCIE_CONFIG_RD_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Program the data strobe */
+ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+
+ retry_count = 0;
+ do {
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ goto try_crs;
+
+ retry_count += ret;
+
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+ if (ret < 0)
+ goto fail;
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+
+try_crs:
+ /*
+ * If it is possible, return Completion Retry Status so that caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+fail:
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+}
+
+static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ u32 reg;
+ u32 data_strobe = 0x0;
+ int retry_count;
+ int offset;
+ int ret;
+
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pci_is_root_bus(bus))
+ return pci_bridge_emul_conf_write(&pcie->bridge, where,
+ size, val);
+
+ if (where % size)
+ return PCIBIOS_SET_FAILED;
+
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (pci_is_root_bus(bus->parent))
+ reg |= PCIE_CONFIG_WR_TYPE0;
+ else
+ reg |= PCIE_CONFIG_WR_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Calculate the write strobe */
+ offset = where & 0x3;
+ reg = val << (8 * offset);
+ data_strobe = GENMASK(size - 1, 0) << offset;
+
+ /* Program the data register */
+ advk_writel(pcie, reg, PIO_WR_DATA);
+
+ /* Program the data strobe */
+ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+
+ retry_count = 0;
+ do {
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
+
+ retry_count += ret;
+
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops advk_pcie_ops = {
+ .read = advk_pcie_rd_conf,
+ .write = advk_pcie_wr_conf,
+};
+
+static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = virt_to_phys(pcie);
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int advk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void advk_msi_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask |= BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask &= ~BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void advk_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip advk_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
+ .irq_set_affinity = advk_msi_set_affinity,
+ .irq_mask = advk_msi_irq_mask,
+ .irq_unmask = advk_msi_irq_unmask,
+};
+
+static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct advk_pcie *pcie = domain->host_data;
+ int hwirq, i;
+
+ mutex_lock(&pcie->msi_used_lock);
+ hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
+ order_base_2(nr_irqs));
+ mutex_unlock(&pcie->msi_used_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &advk_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+
+ return 0;
+}
+
+static void advk_msi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct advk_pcie *pcie = domain->host_data;
+
+ mutex_lock(&pcie->msi_used_lock);
+ bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&pcie->msi_used_lock);
+}
+
+static const struct irq_domain_ops advk_msi_domain_ops = {
+ .alloc = advk_msi_irq_domain_alloc,
+ .free = advk_msi_irq_domain_free,
+};
+
+static void advk_pcie_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static void advk_pcie_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static int advk_pcie_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pcie->irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
+ .map = advk_pcie_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static struct irq_chip advk_msi_irq_chip = {
+ .name = "advk-MSI",
+ .irq_mask = advk_msi_top_irq_mask,
+ .irq_unmask = advk_msi_top_irq_unmask,
+};
+
+static struct msi_domain_info advk_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &advk_msi_irq_chip,
+};
+
+static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ raw_spin_lock_init(&pcie->msi_irq_lock);
+ mutex_init(&pcie->msi_used_lock);
+
+ pcie->msi_inner_domain =
+ irq_domain_add_linear(NULL, MSI_IRQ_NUM,
+ &advk_msi_domain_ops, pcie);
+ if (!pcie->msi_inner_domain)
+ return -ENOMEM;
+
+ pcie->msi_domain =
+ pci_msi_create_irq_domain(dev_fwnode(dev),
+ &advk_msi_domain_info,
+ pcie->msi_inner_domain);
+ if (!pcie->msi_domain) {
+ irq_domain_remove(pcie->msi_inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(pcie->msi_inner_domain);
+}
+
+static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ struct irq_chip *irq_chip;
+ int ret = 0;
+
+ raw_spin_lock_init(&pcie->irq_lock);
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_chip = &pcie->irq_chip;
+
+ irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+ dev_name(dev));
+ if (!irq_chip->name) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ irq_chip->irq_mask = advk_pcie_irq_mask;
+ irq_chip->irq_unmask = advk_pcie_irq_unmask;
+
+ pcie->irq_domain =
+ irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+out_put_node:
+ of_node_put(pcie_intc_node);
+ return ret;
+}
+
+static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->irq_domain);
+}
+
+static struct irq_chip advk_rp_irq_chip = {
+ .name = "advk-RP",
+};
+
+static int advk_pcie_rp_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
+ .map = advk_pcie_rp_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
+{
+ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
+ &advk_pcie_rp_irq_domain_ops,
+ pcie);
+ if (!pcie->rp_irq_domain) {
+ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->rp_irq_domain);
+}
+
+static void advk_pcie_handle_pme(struct advk_pcie *pcie)
+{
+ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
+
+ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
+
+ /*
+ * PCIE_MSG_LOG_REG contains the last inbound message, so store
+ * the requester ID only when PME was not asserted yet.
+ * Also do not trigger PME interrupt when PME is still asserted.
+ */
+ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
+ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
+
+ /*
+ * Trigger PME interrupt only if PMEIE bit in Root Control is set.
+ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
+ */
+ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
+ return;
+
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
+ }
+}
+
+static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+{
+ u32 msi_val, msi_mask, msi_status, msi_idx;
+
+ msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
+ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
+
+ for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
+ if (!(BIT(msi_idx) & msi_status))
+ continue;
+
+ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
+ if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
+ }
+
+ advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
+ PCIE_ISR0_REG);
+}
+
+static void advk_pcie_handle_int(struct advk_pcie *pcie)
+{
+ u32 isr0_val, isr0_mask, isr0_status;
+ u32 isr1_val, isr1_mask, isr1_status;
+ int i;
+
+ isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
+ isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
+
+ isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
+ isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
+
+ /* Process PME interrupt as the first one to do not miss PME requester id */
+ if (isr0_status & PCIE_MSG_PM_PME_MASK)
+ advk_pcie_handle_pme(pcie);
+
+ /* Process ERR interrupt */
+ if (isr0_status & PCIE_ISR0_ERR_MASK) {
+ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
+
+ /*
+ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
+ * PCIe interrupt 0
+ */
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
+ }
+
+ /* Process MSI interrupts */
+ if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
+ advk_pcie_handle_msi(pcie);
+
+ /* Process legacy interrupts */
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
+ continue;
+
+ advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
+ PCIE_ISR1_REG);
+
+ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
+ (char)i + 'A');
+ }
+}
+
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+{
+ struct advk_pcie *pcie = arg;
+ u32 status;
+
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
+
+ advk_pcie_handle_int(pcie);
+
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct advk_pcie *pcie = dev->bus->sysdata;
+
+ /*
+ * Emulated root bridge has its own emulated irq chip and irq domain.
+ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
+ * hwirq for irq_create_mapping() is indexed from zero.
+ */
+ if (pci_is_root_bus(dev->bus))
+ return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
+ else
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
+static void advk_pcie_disable_phy(struct advk_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static int advk_pcie_enable_phy(struct advk_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int advk_pcie_setup_phy(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+
+ pcie->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
+ return PTR_ERR(pcie->phy);
+
+ /* Old bindings miss the PHY handle */
+ if (IS_ERR(pcie->phy)) {
+ dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
+ pcie->phy = NULL;
+ return 0;
+ }
+
+ ret = advk_pcie_enable_phy(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
+
+ return ret;
+}
+
+static int advk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct advk_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ int ret, irq;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ resource_size_t start = entry->res->start;
+ resource_size_t size = resource_size(entry->res);
+ unsigned long type = resource_type(entry->res);
+ u64 win_size;
+
+ /*
+ * Aardvark hardware allows to configure also PCIe window
+ * for config type 0 and type 1 mapping, but driver uses
+ * only PIO for issuing configuration transfers which does
+ * not use PCIe window configuration.
+ */
+ if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
+ continue;
+
+ /*
+ * Skip transparent memory resources. Default outbound access
+ * configuration is set to transparent memory access so it
+ * does not need window configuration.
+ */
+ if (type == IORESOURCE_MEM && entry->offset == 0)
+ continue;
+
+ /*
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
+ * and an access to address A uses this window if A matches the
+ * match with given mask.
+ * So every PCIe window size must be a power of two and every start
+ * address must be aligned to window size. Minimal size is 64 KiB
+ * because lower 16 bits of mask must be zero. Remapped address
+ * may have set only bits from the mask.
+ */
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+ /* Calculate the largest aligned window size */
+ win_size = (1ULL << (fls64(size)-1)) |
+ (start ? (1ULL << __ffs64(start)) : 0);
+ win_size = 1ULL << __ffs64(win_size);
+ if (win_size < 0x10000)
+ break;
+
+ dev_dbg(dev,
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+ pcie->wins_count, (unsigned long long)start,
+ (unsigned long long)start + win_size, type);
+
+ if (type == IORESOURCE_IO) {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+ } else {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+ pcie->wins[pcie->wins_count].match = start;
+ }
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+ break;
+
+ start += win_size;
+ size -= win_size;
+ pcie->wins_count++;
+ }
+
+ if (size > 0) {
+ dev_err(&pcie->pdev->dev,
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
+ (unsigned long long)entry->res->start,
+ (unsigned long long)entry->res->end + 1);
+ return -EINVAL;
+ }
+ }
+
+ pcie->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset-gpio: %i\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(pcie->reset_gpio, "pcie1-reset");
+ if (ret) {
+ dev_err(dev, "Failed to set reset gpio name: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_pci_get_max_link_speed(dev->of_node);
+ if (ret <= 0 || ret > 3)
+ pcie->link_gen = 3;
+ else
+ pcie->link_gen = ret;
+
+ ret = advk_pcie_setup_phy(pcie);
+ if (ret)
+ return ret;
+
+ advk_pcie_setup_hw(pcie);
+
+ ret = advk_sw_pci_bridge_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register emulated root PCI bridge\n");
+ return ret;
+ }
+
+ ret = advk_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize irq\n");
+ return ret;
+ }
+
+ ret = advk_pcie_init_msi_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize irq\n");
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ ret = advk_pcie_init_rp_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize irq\n");
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ bridge->sysdata = pcie;
+ bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = advk_pcie_map_irq;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove(struct platform_device *pdev)
+{
+ struct advk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 val;
+ int i;
+
+ /* Remove PCI bus with all devices */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
+ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
+
+ /* Disable MSI */
+ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
+
+ /* Clear MSI address */
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
+
+ /* Mask all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Remove IRQ domains */
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+
+ /* Free config space for emulated root bridge */
+ pci_bridge_emul_cleanup(&pcie->bridge);
+
+ /* Assert PERST# signal which prepares PCIe card for power down */
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable link training */
+ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ val &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
+
+ /* Disable outbound address windows mapping */
+ for (i = 0; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
+ /* Disable phy */
+ advk_pcie_disable_phy(pcie);
+}
+
+static const struct of_device_id advk_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-3700-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
+
+static struct platform_driver advk_pcie_driver = {
+ .driver = {
+ .name = "advk-pcie",
+ .of_match_table = advk_pcie_of_match_table,
+ },
+ .probe = advk_pcie_probe,
+ .remove_new = advk_pcie_remove,
+};
+module_platform_driver(advk_pcie_driver);
+
+MODULE_DESCRIPTION("Aardvark PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
new file mode 100644
index 000000000..ffdeed25e
--- /dev/null
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Faraday Technology FTPC100 PCI Controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the out-of-tree OpenWRT patch for Cortina Gemini:
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Based on SL2312 PCI controller code
+ * Storlink (C) 2003
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+
+#include "../pci.h"
+
+/*
+ * Special configuration registers directly in the first few words
+ * in I/O space.
+ */
+#define FTPCI_IOSIZE 0x00
+#define FTPCI_PROT 0x04 /* AHB protection */
+#define FTPCI_CTRL 0x08 /* PCI control signal */
+#define FTPCI_SOFTRST 0x10 /* Soft reset counter and response error enable */
+#define FTPCI_CONFIG 0x28 /* PCI configuration command register */
+#define FTPCI_DATA 0x2C
+
+#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */
+#define FARADAY_PCI_PMC 0x40 /* Power management control */
+#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
+#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
+#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */
+#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */
+#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
+#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
+
+#define PCI_STATUS_66MHZ_CAPABLE BIT(21)
+
+/* Bits 31..28 gives INTD..INTA status */
+#define PCI_CTRL2_INTSTS_SHIFT 28
+#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
+#define PCI_CTRL2_INTMASK_PARERR BIT(26)
+/* Bits 25..22 masks INTD..INTA */
+#define PCI_CTRL2_INTMASK_SHIFT 22
+#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21)
+#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20)
+#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19)
+#define PCI_CTRL2_INTMASK_RETRY4 BIT(18)
+#define PCI_CTRL2_INTMASK_SERR_RX BIT(17)
+#define PCI_CTRL2_INTMASK_PERR_RX BIT(16)
+/* Bit 15 reserved */
+#define PCI_CTRL2_MSTPRI_REQ6 BIT(14)
+#define PCI_CTRL2_MSTPRI_REQ5 BIT(13)
+#define PCI_CTRL2_MSTPRI_REQ4 BIT(12)
+#define PCI_CTRL2_MSTPRI_REQ3 BIT(11)
+#define PCI_CTRL2_MSTPRI_REQ2 BIT(10)
+#define PCI_CTRL2_MSTPRI_REQ1 BIT(9)
+#define PCI_CTRL2_MSTPRI_REQ0 BIT(8)
+/* Bits 7..4 reserved */
+/* Bits 3..0 TRDYW */
+
+/*
+ * Memory configs:
+ * Bit 31..20 defines the PCI side memory base
+ * Bit 19..16 (4 bits) defines the size per below
+ */
+#define FARADAY_PCI_MEMBASE_MASK 0xfff00000
+#define FARADAY_PCI_MEMSIZE_1MB 0x0
+#define FARADAY_PCI_MEMSIZE_2MB 0x1
+#define FARADAY_PCI_MEMSIZE_4MB 0x2
+#define FARADAY_PCI_MEMSIZE_8MB 0x3
+#define FARADAY_PCI_MEMSIZE_16MB 0x4
+#define FARADAY_PCI_MEMSIZE_32MB 0x5
+#define FARADAY_PCI_MEMSIZE_64MB 0x6
+#define FARADAY_PCI_MEMSIZE_128MB 0x7
+#define FARADAY_PCI_MEMSIZE_256MB 0x8
+#define FARADAY_PCI_MEMSIZE_512MB 0x9
+#define FARADAY_PCI_MEMSIZE_1GB 0xa
+#define FARADAY_PCI_MEMSIZE_2GB 0xb
+#define FARADAY_PCI_MEMSIZE_SHIFT 16
+
+/*
+ * The DMA base is set to 0x0 for all memory segments, it reflects the
+ * fact that the memory of the host system starts at 0x0.
+ */
+#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
+
+/**
+ * struct faraday_pci_variant - encodes IP block differences
+ * @cascaded_irq: this host has cascaded IRQs from an interrupt controller
+ * embedded in the host bridge.
+ */
+struct faraday_pci_variant {
+ bool cascaded_irq;
+};
+
+struct faraday_pci {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irqdomain;
+ struct pci_bus *bus;
+ struct clk *bus_clk;
+};
+
+static int faraday_res_to_memcfg(resource_size_t mem_base,
+ resource_size_t mem_size, u32 *val)
+{
+ u32 outval;
+
+ switch (mem_size) {
+ case SZ_1M:
+ outval = FARADAY_PCI_MEMSIZE_1MB;
+ break;
+ case SZ_2M:
+ outval = FARADAY_PCI_MEMSIZE_2MB;
+ break;
+ case SZ_4M:
+ outval = FARADAY_PCI_MEMSIZE_4MB;
+ break;
+ case SZ_8M:
+ outval = FARADAY_PCI_MEMSIZE_8MB;
+ break;
+ case SZ_16M:
+ outval = FARADAY_PCI_MEMSIZE_16MB;
+ break;
+ case SZ_32M:
+ outval = FARADAY_PCI_MEMSIZE_32MB;
+ break;
+ case SZ_64M:
+ outval = FARADAY_PCI_MEMSIZE_64MB;
+ break;
+ case SZ_128M:
+ outval = FARADAY_PCI_MEMSIZE_128MB;
+ break;
+ case SZ_256M:
+ outval = FARADAY_PCI_MEMSIZE_256MB;
+ break;
+ case SZ_512M:
+ outval = FARADAY_PCI_MEMSIZE_512MB;
+ break;
+ case SZ_1G:
+ outval = FARADAY_PCI_MEMSIZE_1GB;
+ break;
+ case SZ_2G:
+ outval = FARADAY_PCI_MEMSIZE_2GB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ outval <<= FARADAY_PCI_MEMSIZE_SHIFT;
+
+ /* This is probably not good */
+ if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK))
+ pr_warn("truncated PCI memory base\n");
+ /* Translate to bridge side address space */
+ outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK);
+ pr_debug("Translated pci base @%pap, size %pap to config %08x\n",
+ &mem_base, &mem_size, outval);
+
+ *val = outval;
+ return 0;
+}
+
+static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
+ unsigned int fn, int config, int size,
+ u32 *value)
+{
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
+ p->base + FTPCI_CONFIG);
+
+ *value = readl(p->base + FTPCI_DATA);
+
+ if (size == 1)
+ *value = (*value >> (8 * (config & 3))) & 0xFF;
+ else if (size == 2)
+ *value = (*value >> (8 * (config & 3))) & 0xFFFF;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+
+ return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
+}
+
+static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
+ unsigned int fn, int config, int size,
+ u32 value)
+{
+ int ret = PCIBIOS_SUCCESSFUL;
+
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
+ p->base + FTPCI_CONFIG);
+
+ switch (size) {
+ case 4:
+ writel(value, p->base + FTPCI_DATA);
+ break;
+ case 2:
+ writew(value, p->base + FTPCI_DATA + (config & 3));
+ break;
+ case 1:
+ writeb(value, p->base + FTPCI_DATA + (config & 3));
+ break;
+ default:
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return ret;
+}
+
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+ return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
+ value);
+}
+
+static struct pci_ops faraday_pci_ops = {
+ .read = faraday_pci_read_config,
+ .write = faraday_pci_write_config,
+};
+
+static void faraday_pci_ack_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_mask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
+ | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_unmask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_irq_handler(struct irq_desc *desc)
+{
+ struct faraday_pci *p = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned int irq_stat, reg, i;
+
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ for (i = 0; i < 4; i++) {
+ if ((irq_stat & BIT(i)) == 0)
+ continue;
+ generic_handle_domain_irq(p->irqdomain, i);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct irq_chip faraday_pci_irq_chip = {
+ .name = "PCI",
+ .irq_ack = faraday_pci_ack_irq,
+ .irq_mask = faraday_pci_mask_irq,
+ .irq_unmask = faraday_pci_unmask_irq,
+};
+
+static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops faraday_pci_irqdomain_ops = {
+ .map = faraday_pci_irq_map,
+};
+
+static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+{
+ struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
+ int irq;
+ int i;
+
+ if (!intc) {
+ dev_err(p->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* All PCI IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(p->dev, "failed to get parent IRQ\n");
+ of_node_put(intc);
+ return irq ?: -EINVAL;
+ }
+
+ p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
+ of_node_put(intc);
+ if (!p->irqdomain) {
+ dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
+
+ for (i = 0; i < 4; i++)
+ irq_create_mapping(p->irqdomain, i);
+
+ return 0;
+}
+
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p)
+{
+ struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *entry;
+ u32 confreg[3] = {
+ FARADAY_PCI_MEM1_BASE_SIZE,
+ FARADAY_PCI_MEM2_BASE_SIZE,
+ FARADAY_PCI_MEM3_BASE_SIZE,
+ };
+ int i = 0;
+ u32 val;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 end = entry->res->end - entry->offset;
+ int ret;
+
+ ret = faraday_res_to_memcfg(pci_addr,
+ resource_size(entry->res), &val);
+ if (ret) {
+ dev_err(dev,
+ "DMA range %d: illegal MEM resource size\n", i);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
+ i + 1, pci_addr, end, val);
+ if (i <= 2) {
+ faraday_raw_pci_write_config(p, 0, 0, confreg[i],
+ 4, val);
+ } else {
+ dev_err(dev, "ignore extraneous dma-range %d\n", i);
+ break;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int faraday_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct faraday_pci_variant *variant =
+ of_device_get_match_data(dev);
+ struct resource_entry *win;
+ struct faraday_pci *p;
+ struct resource *io;
+ struct pci_host_bridge *host;
+ struct clk *clk;
+ unsigned char max_bus_speed = PCI_SPEED_33MHz;
+ unsigned char cur_bus_speed = PCI_SPEED_33MHz;
+ int ret;
+ u32 val;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
+ if (!host)
+ return -ENOMEM;
+
+ host->ops = &faraday_pci_ops;
+ p = pci_host_bridge_priv(host);
+ host->sysdata = p;
+ p->dev = dev;
+
+ /* Retrieve and enable optional clocks */
+ clk = devm_clk_get_enabled(dev, "PCLK");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
+ if (IS_ERR(p->bus_clk))
+ return PTR_ERR(p->bus_clk);
+
+ p->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
+
+ win = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (win) {
+ io = win->res;
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + FTPCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Setup hostbridge */
+ val = readl(p->base + FTPCI_CTRL);
+ val |= PCI_COMMAND_IO;
+ val |= PCI_COMMAND_MEMORY;
+ val |= PCI_COMMAND_MASTER;
+ writel(val, p->base + FTPCI_CTRL);
+ /* Mask and clear all interrupts */
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+ if (variant->cascaded_irq) {
+ ret = faraday_pci_setup_cascaded_irq(p);
+ if (ret) {
+ dev_err(dev, "failed to setup cascaded IRQ\n");
+ return ret;
+ }
+ }
+
+ /* Check bus clock if we can gear up to 66 MHz */
+ if (!IS_ERR(p->bus_clk)) {
+ unsigned long rate;
+ u32 val;
+
+ faraday_raw_pci_read_config(p, 0, 0,
+ FARADAY_PCI_STATUS_CMD, 4, &val);
+ rate = clk_get_rate(p->bus_clk);
+
+ if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
+ dev_info(dev, "33MHz bus is 66MHz capable\n");
+ max_bus_speed = PCI_SPEED_66MHz;
+ ret = clk_set_rate(p->bus_clk, 66000000);
+ if (ret)
+ dev_err(dev, "failed to set bus clock\n");
+ } else {
+ dev_info(dev, "33MHz only bus\n");
+ max_bus_speed = PCI_SPEED_33MHz;
+ }
+
+ /* Bumping the clock may fail so read back the rate */
+ rate = clk_get_rate(p->bus_clk);
+ if (rate == 33000000)
+ cur_bus_speed = PCI_SPEED_33MHz;
+ if (rate == 66000000)
+ cur_bus_speed = PCI_SPEED_66MHz;
+ }
+
+ ret = faraday_pci_parse_map_dma_ranges(p);
+ if (ret)
+ return ret;
+
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to scan host: %d\n", ret);
+ return ret;
+ }
+ p->bus = host->bus;
+ p->bus->max_bus_speed = max_bus_speed;
+ p->bus->cur_bus_speed = cur_bus_speed;
+
+ pci_bus_assign_resources(p->bus);
+ pci_bus_add_devices(p->bus);
+
+ return 0;
+}
+
+/*
+ * We encode bridge variants here, we have at least two so it doesn't
+ * hurt to have infrastructure to encompass future variants as well.
+ */
+static const struct faraday_pci_variant faraday_regular = {
+ .cascaded_irq = true,
+};
+
+static const struct faraday_pci_variant faraday_dual = {
+ .cascaded_irq = false,
+};
+
+static const struct of_device_id faraday_pci_of_match[] = {
+ {
+ .compatible = "faraday,ftpci100",
+ .data = &faraday_regular,
+ },
+ {
+ .compatible = "faraday,ftpci100-dual",
+ .data = &faraday_dual,
+ },
+ {},
+};
+
+static struct platform_driver faraday_pci_driver = {
+ .driver = {
+ .name = "ftpci100",
+ .of_match_table = faraday_pci_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = faraday_pci_probe,
+};
+builtin_platform_driver(faraday_pci_driver);
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
new file mode 100644
index 000000000..6be3266cd
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic PCI host driver common code
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+static void gen_pci_unmap_cfg(void *ptr)
+{
+ pci_ecam_free((struct pci_config_window *)ptr);
+}
+
+static struct pci_config_window *gen_pci_init(struct device *dev,
+ struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops)
+{
+ int err;
+ struct resource cfgres;
+ struct resource_entry *bus;
+ struct pci_config_window *cfg;
+
+ err = of_address_to_resource(dev->of_node, 0, &cfgres);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return ERR_PTR(err);
+ }
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return ERR_PTR(-ENODEV);
+
+ cfg = pci_ecam_create(dev, &cfgres, bus->res, ops);
+ if (IS_ERR(cfg))
+ return cfg;
+
+ err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg);
+ if (err)
+ return ERR_PTR(err);
+
+ return cfg;
+}
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+ const struct pci_ecam_ops *ops;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bridge);
+
+ of_pci_check_probe_only();
+
+ /* Parse and map our Configuration Space windows */
+ cfg = gen_pci_init(dev, bridge, ops);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->msi_domain = true;
+
+ return pci_host_probe(bridge);
+}
+EXPORT_SYMBOL_GPL(pci_host_common_probe);
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_host_common_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
new file mode 100644
index 000000000..63865aeb6
--- /dev/null
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple, generic PCI host controller driver targeting firmware-initialised
+ * systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
+ .bus_shift = 16,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ /*
+ * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
+ * type 0 config TLPs sent to devices 1 and up on its downstream port,
+ * resulting in devices appearing multiple times on bus 0 unless we
+ * filter out those accesses here.
+ */
+ if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0)
+ return false;
+
+ return true;
+}
+
+static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ if (!pci_dw_valid_device(bus, devfn))
+ return NULL;
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static const struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+ .pci_ops = {
+ .map_bus = pci_dw_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id gen_pci_of_match[] = {
+ { .compatible = "pci-host-cam-generic",
+ .data = &gen_pci_cfg_cam_bus_ops },
+
+ { .compatible = "pci-host-ecam-generic",
+ .data = &pci_generic_ecam_ops },
+
+ { .compatible = "marvell,armada8k-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "socionext,synquacer-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "snps,dw-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
+
+static struct platform_driver gen_pci_driver = {
+ .driver = {
+ .name = "pci-host-generic",
+ .of_match_table = gen_pci_of_match,
+ },
+ .probe = pci_host_common_probe,
+ .remove = pci_host_common_remove,
+};
+module_platform_driver(gen_pci_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
new file mode 100644
index 000000000..cc96be450
--- /dev/null
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ *
+ * This small module is a helper driver allows other drivers to
+ * have a common interface with the Hyper-V PCI frontend driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hyperv.h>
+
+struct hyperv_pci_block_ops hvpci_block_ops;
+EXPORT_SYMBOL_GPL(hvpci_block_ops);
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned)
+{
+ if (!hvpci_block_ops.read_block)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.read_block(dev, buf, buf_len, block_id,
+ bytes_returned);
+}
+EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk);
+
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id)
+{
+ if (!hvpci_block_ops.write_block)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.write_block(dev, buf, len, block_id);
+}
+EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk);
+
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ if (!hvpci_block_ops.reg_blk_invalidate)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.reg_blk_invalidate(dev, context,
+ block_invalidate);
+}
+EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate);
+
+static void __exit exit_hv_pci_intf(void)
+{
+}
+
+static int __init init_hv_pci_intf(void)
+{
+ return 0;
+}
+
+module_init(init_hv_pci_intf);
+module_exit(exit_hv_pci_intf);
+
+MODULE_DESCRIPTION("Hyper-V PCI Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
new file mode 100644
index 000000000..bed3cefda
--- /dev/null
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -0,0 +1,4121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ * Jake Oshins <jakeo@microsoft.com>
+ *
+ * This driver acts as a paravirtual front-end for PCI Express root buses.
+ * When a PCI Express function (either an entire device or an SR-IOV
+ * Virtual Function) is being passed through to the VM, this driver exposes
+ * a new bus to the guest VM. This is modeled as a root PCI bus because
+ * no bridges are being exposed to the VM. In fact, with a "Generation 2"
+ * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
+ * until a device as been exposed using this driver.
+ *
+ * Each root PCI bus has its own PCI domain, which is called "Segment" in
+ * the PCI Firmware Specifications. Thus while each device passed through
+ * to the VM using this front-end will appear at "device 0", the domain will
+ * be unique. Typically, each bus will have one PCI function on it, though
+ * this driver does support more than one.
+ *
+ * In order to map the interrupts from the device through to the guest VM,
+ * this driver also implements an IRQ Domain, which handles interrupts (either
+ * MSI or MSI-X) associated with the functions on the bus. As interrupts are
+ * set up, torn down, or reaffined, this driver communicates with the
+ * underlying hypervisor to adjust the mappings in the I/O MMU so that each
+ * interrupt will be delivered to the correct virtual processor at the right
+ * vector. This driver does not support level-triggered (line-based)
+ * interrupts, and will report that the Interrupt Line register in the
+ * function's configuration space is zero.
+ *
+ * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
+ * facilities. For instance, the configuration space of a function exposed
+ * by Hyper-V is mapped into a single page of memory space, and the
+ * read and write handlers for config space must be aware of this mechanism.
+ * Similarly, device setup and teardown involves messages sent to and from
+ * the PCI back-end driver in Hyper-V.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/hyperv.h>
+#include <linux/refcount.h>
+#include <linux/irqdomain.h>
+#include <linux/acpi.h>
+#include <asm/mshyperv.h>
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the
+ * major version.
+ */
+
+#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
+#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
+#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
+
+enum pci_protocol_version_t {
+ PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
+ PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
+ PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
+ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
+};
+
+#define CPU_AFFINITY_ALL -1ULL
+
+/*
+ * Supported protocol versions in the order of probing - highest go
+ * first.
+ */
+static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_4,
+ PCI_PROTOCOL_VERSION_1_3,
+ PCI_PROTOCOL_VERSION_1_2,
+ PCI_PROTOCOL_VERSION_1_1,
+};
+
+#define PCI_CONFIG_MMIO_LENGTH 0x2000
+#define CFG_PAGE_OFFSET 0x1000
+#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
+
+#define MAX_SUPPORTED_MSI_MESSAGES 0x400
+
+#define STATUS_REVISION_MISMATCH 0xC0000059
+
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
+/*
+ * Size of requestor for VMbus; the value is based on the observation
+ * that having more than one request outstanding is 'rare', and so 64
+ * should be generous in ensuring that we don't ever run out.
+ */
+#define HV_PCI_RQSTOR_SIZE 64
+
+/*
+ * Message Types
+ */
+
+enum pci_message_type {
+ /*
+ * Version 1.1
+ */
+ PCI_MESSAGE_BASE = 0x42490000,
+ PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
+ PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
+ PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
+ PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
+ PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
+ PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
+ PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
+ PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
+ PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
+ PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
+ PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
+ PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
+ PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
+ PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
+ PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
+ PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
+ PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
+ PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
+ PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
+ PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
+ PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
+ PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
+ PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
+ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
+ PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
+ PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
+ PCI_MESSAGE_MAXIMUM
+};
+
+/*
+ * Structures defining the virtual PCI Express protocol.
+ */
+
+union pci_version {
+ struct {
+ u16 minor_version;
+ u16 major_version;
+ } parts;
+ u32 version;
+} __packed;
+
+/*
+ * Function numbers are 8-bits wide on Express, as interpreted through ARI,
+ * which is all this driver does. This representation is the one used in
+ * Windows, which is what is expected when sending this back and forth with
+ * the Hyper-V parent partition.
+ */
+union win_slot_encoding {
+ struct {
+ u32 dev:5;
+ u32 func:3;
+ u32 reserved:24;
+ } bits;
+ u32 slot;
+} __packed;
+
+/*
+ * Pretty much as defined in the PCI Specifications.
+ */
+struct pci_function_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+} __packed;
+
+enum pci_device_description_flags {
+ HV_PCI_DEVICE_FLAG_NONE = 0x0,
+ HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
+};
+
+struct pci_function_description2 {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+ u16 reserved;
+} __packed;
+
+/**
+ * struct hv_msi_desc
+ * @vector: IDT entry
+ * @delivery_mode: As defined in Intel's Programmer's
+ * Reference Manual, Volume 3, Chapter 8.
+ * @vector_count: Number of contiguous entries in the
+ * Interrupt Descriptor Table that are
+ * occupied by this Message-Signaled
+ * Interrupt. For "MSI", as first defined
+ * in PCI 2.2, this can be between 1 and
+ * 32. For "MSI-X," as first defined in PCI
+ * 3.0, this must be 1, as each MSI-X table
+ * entry would have its own descriptor.
+ * @reserved: Empty space
+ * @cpu_mask: All the target virtual processors.
+ */
+struct hv_msi_desc {
+ u8 vector;
+ u8 delivery_mode;
+ u16 vector_count;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+/**
+ * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
+ * @vector: IDT entry
+ * @delivery_mode: As defined in Intel's Programmer's
+ * Reference Manual, Volume 3, Chapter 8.
+ * @vector_count: Number of contiguous entries in the
+ * Interrupt Descriptor Table that are
+ * occupied by this Message-Signaled
+ * Interrupt. For "MSI", as first defined
+ * in PCI 2.2, this can be between 1 and
+ * 32. For "MSI-X," as first defined in PCI
+ * 3.0, this must be 1, as each MSI-X table
+ * entry would have its own descriptor.
+ * @processor_count: number of bits enabled in array.
+ * @processor_array: All the target virtual processors.
+ */
+struct hv_msi_desc2 {
+ u8 vector;
+ u8 delivery_mode;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
+/*
+ * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
+ * Everything is the same as in 'hv_msi_desc2' except that the size of the
+ * 'vector' field is larger to support bigger vector values. For ex: LPI
+ * vectors on ARM.
+ */
+struct hv_msi_desc3 {
+ u32 vector;
+ u8 delivery_mode;
+ u8 reserved;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
+/**
+ * struct tran_int_desc
+ * @reserved: unused, padding
+ * @vector_count: same as in hv_msi_desc
+ * @data: This is the "data payload" value that is
+ * written by the device when it generates
+ * a message-signaled interrupt, either MSI
+ * or MSI-X.
+ * @address: This is the address to which the data
+ * payload is written on interrupt
+ * generation.
+ */
+struct tran_int_desc {
+ u16 reserved;
+ u16 vector_count;
+ u32 data;
+ u64 address;
+} __packed;
+
+/*
+ * A generic message format for virtual PCI.
+ * Specific message formats are defined later in the file.
+ */
+
+struct pci_message {
+ u32 type;
+} __packed;
+
+struct pci_child_message {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+} __packed;
+
+struct pci_incoming_message {
+ struct vmpacket_descriptor hdr;
+ struct pci_message message_type;
+} __packed;
+
+struct pci_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+} __packed;
+
+struct pci_packet {
+ void (*completion_func)(void *context, struct pci_response *resp,
+ int resp_packet_size);
+ void *compl_ctxt;
+
+ struct pci_message message[];
+};
+
+/*
+ * Specific message types supporting the PCI protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * pci_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct pci_version_request {
+ struct pci_message message_type;
+ u32 protocol_version;
+} __packed;
+
+/*
+ * Bus D0 Entry. This is sent from the guest to the host when the virtual
+ * bus (PCI Express port) is ready for action.
+ */
+
+struct pci_bus_d0_entry {
+ struct pci_message message_type;
+ u32 reserved;
+ u64 mmio_base;
+} __packed;
+
+struct pci_bus_relations {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description func[];
+} __packed;
+
+struct pci_bus_relations2 {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description2 func[];
+} __packed;
+
+struct pci_q_res_req_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+ u32 probed_bar[PCI_STD_NUM_BARS];
+} __packed;
+
+struct pci_set_power {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u32 power_state; /* In Windows terms */
+ u32 reserved;
+} __packed;
+
+struct pci_set_power_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+ union win_slot_encoding wslot;
+ u32 resultant_state; /* In Windows terms */
+ u32 reserved;
+} __packed;
+
+struct pci_resources_assigned {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u8 memory_range[0x14][6]; /* not used here */
+ u32 msi_descriptors;
+ u32 reserved[4];
+} __packed;
+
+struct pci_resources_assigned2 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u8 memory_range[0x14][6]; /* not used here */
+ u32 msi_descriptor_count;
+ u8 reserved[70];
+} __packed;
+
+struct pci_create_interrupt {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc int_desc;
+} __packed;
+
+struct pci_create_int_response {
+ struct pci_response response;
+ u32 reserved;
+ struct tran_int_desc int_desc;
+} __packed;
+
+struct pci_create_interrupt2 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc2 int_desc;
+} __packed;
+
+struct pci_create_interrupt3 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc3 int_desc;
+} __packed;
+
+struct pci_delete_interrupt {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct tran_int_desc int_desc;
+} __packed;
+
+/*
+ * Note: the VM must pass a valid block id, wslot and bytes_requested.
+ */
+struct pci_read_block {
+ struct pci_message message_type;
+ u32 block_id;
+ union win_slot_encoding wslot;
+ u32 bytes_requested;
+} __packed;
+
+struct pci_read_block_response {
+ struct vmpacket_descriptor hdr;
+ u32 status;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+/*
+ * Note: the VM must pass a valid block id, wslot and byte_count.
+ */
+struct pci_write_block {
+ struct pci_message message_type;
+ u32 block_id;
+ union win_slot_encoding wslot;
+ u32 byte_count;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+struct pci_dev_inval_block {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+ u64 block_mask;
+} __packed;
+
+struct pci_dev_incoming {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+} __packed;
+
+struct pci_eject_response {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u32 status;
+} __packed;
+
+static int pci_ring_size = (4 * PAGE_SIZE);
+
+/*
+ * Driver specific state.
+ */
+
+enum hv_pcibus_state {
+ hv_pcibus_init = 0,
+ hv_pcibus_probed,
+ hv_pcibus_installed,
+ hv_pcibus_removing,
+ hv_pcibus_maximum
+};
+
+struct hv_pcibus_device {
+#ifdef CONFIG_X86
+ struct pci_sysdata sysdata;
+#elif defined(CONFIG_ARM64)
+ struct pci_config_window sysdata;
+#endif
+ struct pci_host_bridge *bridge;
+ struct fwnode_handle *fwnode;
+ /* Protocol version negotiated with the host */
+ enum pci_protocol_version_t protocol_version;
+
+ struct mutex state_lock;
+ enum hv_pcibus_state state;
+
+ struct hv_device *hdev;
+ resource_size_t low_mmio_space;
+ resource_size_t high_mmio_space;
+ struct resource *mem_config;
+ struct resource *low_mmio_res;
+ struct resource *high_mmio_res;
+ struct completion *survey_event;
+ struct pci_bus *pci_bus;
+ spinlock_t config_lock; /* Avoid two threads writing index page */
+ spinlock_t device_list_lock; /* Protect lists below */
+ void __iomem *cfg_addr;
+
+ struct list_head children;
+ struct list_head dr_list;
+
+ struct msi_domain_info msi_info;
+ struct irq_domain *irq_domain;
+
+ struct workqueue_struct *wq;
+
+ /* Highest slot of child device with resources allocated */
+ int wslot_res_allocated;
+ bool use_calls; /* Use hypercalls to access mmio cfg space */
+};
+
+/*
+ * Tracks "Device Relations" messages from the host, which must be both
+ * processed in order and deferred so that they don't run in the context
+ * of the incoming packet callback.
+ */
+struct hv_dr_work {
+ struct work_struct wrk;
+ struct hv_pcibus_device *bus;
+};
+
+struct hv_pcidev_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+};
+
+struct hv_dr_state {
+ struct list_head list_entry;
+ u32 device_count;
+ struct hv_pcidev_description func[];
+};
+
+struct hv_pci_dev {
+ /* List protected by pci_rescan_remove_lock */
+ struct list_head list_entry;
+ refcount_t refs;
+ struct pci_slot *pci_slot;
+ struct hv_pcidev_description desc;
+ bool reported_missing;
+ struct hv_pcibus_device *hbus;
+ struct work_struct wrk;
+
+ void (*block_invalidate)(void *context, u64 block_mask);
+ void *invalidate_context;
+
+ /*
+ * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
+ * read it back, for each of the BAR offsets within config space.
+ */
+ u32 probed_bar[PCI_STD_NUM_BARS];
+};
+
+struct hv_pci_compl {
+ struct completion host_event;
+ s32 completion_status;
+};
+
+static void hv_pci_onchannelcallback(void *context);
+
+#ifdef CONFIG_X86
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define FLOW_HANDLER handle_edge_irq
+#define FLOW_NAME "edge"
+
+static int hv_pci_irqchip_init(void)
+{
+ return 0;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return x86_vector_domain;
+}
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ return cfg->vector;
+}
+
+#define hv_msi_prepare pci_msi_prepare
+
+/**
+ * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data: Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+ struct hv_retarget_device_interrupt *params;
+ struct tran_int_desc *int_desc;
+ struct hv_pcibus_device *hbus;
+ const struct cpumask *dest;
+ cpumask_var_t tmp;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u32 var_size = 0;
+ int cpu, nr_bank;
+ u64 res;
+
+ dest = irq_data_get_effective_affinity_mask(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
+ if (!int_desc) {
+ dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
+ __func__, data->irq);
+ return;
+ }
+
+ local_irq_save(flags);
+
+ params = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
+ params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
+ params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ (hbus->hdev->dev_instance.b[4] << 16) |
+ (hbus->hdev->dev_instance.b[7] << 8) |
+ (hbus->hdev->dev_instance.b[6] & 0xf8) |
+ PCI_FUNC(pdev->devfn);
+ params->int_target.vector = hv_msi_get_int_vector(data);
+
+ /*
+ * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
+ * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+ * spurious interrupt storm. Not doing so does not seem to have a
+ * negative effect (yet?).
+ */
+
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ /*
+ * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+ * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+ * with >64 VP support.
+ * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+ * is not sufficient for this hypercall.
+ */
+ params->int_target.flags |=
+ HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+ res = 1;
+ goto out;
+ }
+
+ cpumask_and(tmp, dest, cpu_online_mask);
+ nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+ free_cpumask_var(tmp);
+
+ if (nr_bank <= 0) {
+ res = 1;
+ goto out;
+ }
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = 1 + nr_bank;
+ } else {
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ params->int_target.vp_mask |=
+ (1ULL << hv_cpu_number_to_vp_number(cpu));
+ }
+ }
+
+ res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+ params, NULL);
+
+out:
+ local_irq_restore(flags);
+
+ /*
+ * During hibernation, when a CPU is offlined, the kernel tries
+ * to move the interrupt to the remaining CPUs that haven't
+ * been offlined yet. In this case, the below hv_do_hypercall()
+ * always fails since the vmbus channel has been closed:
+ * refer to cpu_disable_common() -> fixup_irqs() ->
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
+ *
+ * Suppress the error message for hibernation because the failure
+ * during hibernation does not matter (at this time all the devices
+ * have been frozen). Note: the correct affinity info is still updated
+ * into the irqdata data structure in migrate_one_irq() ->
+ * irq_do_set_affinity(), so later when the VM resumes,
+ * hv_pci_restore_msi_state() is able to correctly restore the
+ * interrupt with the correct affinity.
+ */
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
+ dev_err(&hbus->hdev->device,
+ "%s() failed: %#llx", __func__, res);
+}
+#elif defined(CONFIG_ARM64)
+/*
+ * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
+ * of room at the start to allow for SPIs to be specified through ACPI and
+ * starting with a power of two to satisfy power of 2 multi-MSI requirement.
+ */
+#define HV_PCI_MSI_SPI_START 64
+#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
+#define DELIVERY_MODE 0
+#define FLOW_HANDLER NULL
+#define FLOW_NAME NULL
+#define hv_msi_prepare NULL
+
+struct hv_pci_chip_data {
+ DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
+ struct mutex map_lock;
+};
+
+/* Hyper-V vPCI MSI GIC IRQ domain */
+static struct irq_domain *hv_msi_gic_irq_domain;
+
+/* Hyper-V PCI MSI IRQ chip */
+static struct irq_chip hv_arm64_msi_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent
+};
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
+{
+ return irqd->parent_data->hwirq;
+}
+
+/*
+ * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
+ * the bitmap.
+ * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
+ * the parent domain.
+ */
+static void hv_pci_vec_irq_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_bm_irqs,
+ unsigned int nr_dom_irqs)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ int first = d->hwirq - HV_PCI_MSI_SPI_START;
+ int i;
+
+ mutex_lock(&chip_data->map_lock);
+ bitmap_release_region(chip_data->spi_map,
+ first,
+ get_count_order(nr_bm_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ for (i = 0; i < nr_dom_irqs; i++) {
+ if (i)
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
+}
+
+static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
+}
+
+static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
+ unsigned int nr_irqs,
+ irq_hw_number_t *hwirq)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ int index;
+
+ /* Find and allocate region from the SPI bitmap */
+ mutex_lock(&chip_data->map_lock);
+ index = bitmap_find_free_region(chip_data->spi_map,
+ HV_PCI_MSI_SPI_NR,
+ get_count_order(nr_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ if (index < 0)
+ return -ENOSPC;
+
+ *hwirq = index + HV_PCI_MSI_SPI_START;
+
+ return 0;
+}
+
+static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ /*
+ * Since the interrupt specifier is not coming from ACPI or DT, the
+ * trigger type will need to be set explicitly. Otherwise, it will be
+ * set to whatever is in the GIC configuration.
+ */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ irq_hw_number_t hwirq;
+ unsigned int i;
+ int ret;
+
+ ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
+ hwirq + i);
+ if (ret) {
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i,
+ &hv_arm64_msi_irq_chip,
+ domain->host_data);
+ pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * Pick the first cpu as the irq affinity that can be temporarily used for
+ * composing MSI from the hypervisor. GIC will eventually set the right
+ * affinity for the irq and the 'unmask' will retarget the interrupt to that
+ * cpu.
+ */
+static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ int cpu = cpumask_first(cpu_present_mask);
+
+ irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
+
+ return 0;
+}
+
+static const struct irq_domain_ops hv_pci_domain_ops = {
+ .alloc = hv_pci_vec_irq_domain_alloc,
+ .free = hv_pci_vec_irq_domain_free,
+ .activate = hv_pci_vec_irq_domain_activate,
+};
+
+static int hv_pci_irqchip_init(void)
+{
+ static struct hv_pci_chip_data *chip_data;
+ struct fwnode_handle *fn = NULL;
+ int ret = -ENOMEM;
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return ret;
+
+ mutex_init(&chip_data->map_lock);
+ fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
+ if (!fn)
+ goto free_chip;
+
+ /*
+ * IRQ domain once enabled, should not be removed since there is no
+ * way to ensure that all the corresponding devices are also gone and
+ * no interrupts will be generated.
+ */
+ hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
+
+ if (!hv_msi_gic_irq_domain) {
+ pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
+ goto free_chip;
+ }
+
+ return 0;
+
+free_chip:
+ kfree(chip_data);
+ if (fn)
+ irq_domain_free_fwnode(fn);
+
+ return ret;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return hv_msi_gic_irq_domain;
+}
+
+/*
+ * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
+ * registers which Hyper-V already supports, so no hypercall needed.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data) { }
+#endif /* CONFIG_ARM64 */
+
+/**
+ * hv_pci_generic_compl() - Invoked for a completion packet
+ * @context: Set up by the sender of the packet.
+ * @resp: The response packet
+ * @resp_packet_size: Size in bytes of the packet
+ *
+ * This function is used to trigger an event and report status
+ * for any message for which the completion packet contains a
+ * status and nothing else.
+ */
+static void hv_pci_generic_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_pci_compl *comp_pkt = context;
+
+ comp_pkt->completion_status = resp->status;
+ complete(&comp_pkt->host_event);
+}
+
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+ u32 wslot);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+ refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+ if (refcount_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
+
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+ struct completion *comp)
+{
+ while (true) {
+ if (hdev->channel->rescind) {
+ dev_warn_once(&hdev->device, "The device is gone.\n");
+ return -ENODEV;
+ }
+
+ if (wait_for_completion_timeout(comp, HZ / 10))
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * devfn_to_wslot() - Convert from Linux PCI slot to Windows
+ * @devfn: The Linux representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Windows representation
+ */
+static u32 devfn_to_wslot(int devfn)
+{
+ union win_slot_encoding wslot;
+
+ wslot.slot = 0;
+ wslot.bits.dev = PCI_SLOT(devfn);
+ wslot.bits.func = PCI_FUNC(devfn);
+
+ return wslot.slot;
+}
+
+/**
+ * wslot_to_devfn() - Convert from Windows PCI slot to Linux
+ * @wslot: The Windows representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Linux representation
+ */
+static int wslot_to_devfn(u32 wslot)
+{
+ union win_slot_encoding slot_no;
+
+ slot_no.slot = wslot;
+ return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
+}
+
+static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
+{
+ struct hv_mmio_read_input *in;
+ struct hv_mmio_read_output *out;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument page. Use it for
+ * both input and output.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
+ in->gpa = gpa;
+ in->size = size;
+
+ ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
+ if (hv_result_success(ret)) {
+ switch (size) {
+ case 1:
+ *val = *(u8 *)(out->data);
+ break;
+ case 2:
+ *val = *(u16 *)(out->data);
+ break;
+ default:
+ *val = *(u32 *)(out->data);
+ break;
+ }
+ } else
+ dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
+static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
+{
+ struct hv_mmio_write_input *in;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument memory.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ in->gpa = gpa;
+ in->size = size;
+ switch (size) {
+ case 1:
+ *(u8 *)(in->data) = val;
+ break;
+ case 2:
+ *(u16 *)(in->data) = val;
+ break;
+ default:
+ *(u32 *)(in->data) = val;
+ break;
+ }
+
+ ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
+ if (!hv_result_success(ret))
+ dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
+/*
+ * PCI Configuration Space for these root PCI buses is implemented as a pair
+ * of pages in memory-mapped I/O space. Writing to the first page chooses
+ * the PCI function being written or read. Once the first page has been
+ * written to, the following page maps in the entire configuration space of
+ * the function.
+ */
+
+/**
+ * _hv_pcifront_read_config() - Internal PCI config read
+ * @hpdev: The PCI driver's representation of the device
+ * @where: Offset within config space
+ * @size: Size of the transfer
+ * @val: Pointer to the buffer receiving the data
+ */
+static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+ int size, u32 *val)
+{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
+ unsigned long flags;
+
+ /*
+ * If the attempt is to read the IDs or the ROM BAR, simulate that.
+ */
+ if (where + size <= PCI_COMMAND) {
+ memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
+ } else if (where >= PCI_CLASS_REVISION && where + size <=
+ PCI_CACHE_LINE_SIZE) {
+ memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
+ PCI_CLASS_REVISION, size);
+ } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
+ PCI_ROM_ADDRESS) {
+ memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
+ PCI_SUBSYSTEM_VENDOR_ID, size);
+ } else if (where >= PCI_ROM_ADDRESS && where + size <=
+ PCI_CAPABILITY_LIST) {
+ /* ROM BARs are unimplemented */
+ *val = 0;
+ } else if (where >= PCI_INTERRUPT_LINE && where + size <=
+ PCI_INTERRUPT_PIN) {
+ /*
+ * Interrupt Line and Interrupt PIN are hard-wired to zero
+ * because this front-end only supports message-signaled
+ * interrupts.
+ */
+ *val = 0;
+ } else if (where + size <= CFG_PAGE_SIZE) {
+
+ spin_lock_irqsave(&hbus->config_lock, flags);
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before reading. */
+ mb();
+ /* Read from that function's config space. */
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ break;
+ }
+ /*
+ * Make sure the read was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
+ }
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
+ } else {
+ dev_err(dev, "Attempt to read beyond a function's config space.\n");
+ }
+}
+
+static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
+{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ u32 val;
+ u16 ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hbus->config_lock, flags);
+
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start +
+ CFG_PAGE_OFFSET + PCI_VENDOR_ID;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, 2, &val);
+ ret = val; /* Truncates to 16 bits */
+ } else {
+ void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
+ PCI_VENDOR_ID;
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before we start reading. */
+ mb();
+ /* Read from that function's config space. */
+ ret = readw(addr);
+ /*
+ * mb() is not required here, because the
+ * spin_unlock_irqrestore() is a barrier.
+ */
+ }
+
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
+
+ return ret;
+}
+
+/**
+ * _hv_pcifront_write_config() - Internal PCI config write
+ * @hpdev: The PCI driver's representation of the device
+ * @where: Offset within config space
+ * @size: Size of the transfer
+ * @val: The data being transferred
+ */
+static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
+ int size, u32 val)
+{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
+ unsigned long flags;
+
+ if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
+ where + size <= PCI_CAPABILITY_LIST) {
+ /* SSIDs and ROM BARs are read-only */
+ } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
+ spin_lock_irqsave(&hbus->config_lock, flags);
+
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_write_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to write. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before writing. */
+ wmb();
+ /* Write to that function's config space. */
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ break;
+ }
+ /*
+ * Make sure the write was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
+ }
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
+ } else {
+ dev_err(dev, "Attempt to write beyond a function's config space.\n");
+ }
+}
+
+/**
+ * hv_pcifront_read_config() - Read configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be read
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+ if (!hpdev)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ _hv_pcifront_read_config(hpdev, where, size, val);
+
+ put_pcichild(hpdev);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * hv_pcifront_write_config() - Write configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be written to device
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+ if (!hpdev)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ _hv_pcifront_write_config(hpdev, where, size, val);
+
+ put_pcichild(hpdev);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCIe operations */
+static struct pci_ops hv_pcifront_ops = {
+ .read = hv_pcifront_read_config,
+ .write = hv_pcifront_write_config,
+};
+
+/*
+ * Paravirtual backchannel
+ *
+ * Hyper-V SR-IOV provides a backchannel mechanism in software for
+ * communication between a VF driver and a PF driver. These
+ * "configuration blocks" are similar in concept to PCI configuration space,
+ * but instead of doing reads and writes in 32-bit chunks through a very slow
+ * path, packets of up to 128 bytes can be sent or received asynchronously.
+ *
+ * Nearly every SR-IOV device contains just such a communications channel in
+ * hardware, so using this one in software is usually optional. Using the
+ * software channel, however, allows driver implementers to leverage software
+ * tools that fuzz the communications channel looking for vulnerabilities.
+ *
+ * The usage model for these packets puts the responsibility for reading or
+ * writing on the VF driver. The VF driver sends a read or a write packet,
+ * indicating which "block" is being referred to by number.
+ *
+ * If the PF driver wishes to initiate communication, it can "invalidate" one or
+ * more of the first 64 blocks. This invalidation is delivered via a callback
+ * supplied by the VF driver by this driver.
+ *
+ * No protocol is implied, except that supplied by the PF and VF drivers.
+ */
+
+struct hv_read_config_compl {
+ struct hv_pci_compl comp_pkt;
+ void *buf;
+ unsigned int len;
+ unsigned int bytes_returned;
+};
+
+/**
+ * hv_pci_read_config_compl() - Invoked when a response packet
+ * for a read config block operation arrives.
+ * @context: Identifies the read config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_read_config_compl *comp = context;
+ struct pci_read_block_response *read_resp =
+ (struct pci_read_block_response *)resp;
+ unsigned int data_len, hdr_len;
+
+ hdr_len = offsetof(struct pci_read_block_response, bytes);
+ if (resp_packet_size < hdr_len) {
+ comp->comp_pkt.completion_status = -1;
+ goto out;
+ }
+
+ data_len = resp_packet_size - hdr_len;
+ if (data_len > 0 && read_resp->status == 0) {
+ comp->bytes_returned = min(comp->len, data_len);
+ memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
+ } else {
+ comp->bytes_returned = 0;
+ }
+
+ comp->comp_pkt.completion_status = read_resp->status;
+out:
+ complete(&comp->comp_pkt.host_event);
+}
+
+/**
+ * hv_read_config_block() - Sends a read config block request to
+ * the back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer into which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which has been requested.
+ * @bytes_returned: Size which came back from the back-end driver.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_read_config_block(struct pci_dev *pdev, void *buf,
+ unsigned int len, unsigned int block_id,
+ unsigned int *bytes_returned)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_read_block)];
+ } pkt;
+ struct hv_read_config_compl comp_pkt;
+ struct pci_read_block *read_blk;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.comp_pkt.host_event);
+ comp_pkt.buf = buf;
+ comp_pkt.len = len;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_read_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk->message_type.type = PCI_READ_BLOCK;
+ read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ read_blk->block_id = block_id;
+ read_blk->bytes_requested = len;
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
+ sizeof(*read_blk), (unsigned long)&pkt.pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.comp_pkt.completion_status != 0 ||
+ comp_pkt.bytes_returned == 0) {
+ dev_err(&hbus->hdev->device,
+ "Read Config Block failed: 0x%x, bytes_returned=%d\n",
+ comp_pkt.comp_pkt.completion_status,
+ comp_pkt.bytes_returned);
+ return -EIO;
+ }
+
+ *bytes_returned = comp_pkt.bytes_returned;
+ return 0;
+}
+
+/**
+ * hv_pci_write_config_compl() - Invoked when a response packet for a write
+ * config block operation arrives.
+ * @context: Identifies the write config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_pci_compl *comp_pkt = context;
+
+ comp_pkt->completion_status = resp->status;
+ complete(&comp_pkt->host_event);
+}
+
+/**
+ * hv_write_config_block() - Sends a write config block request to the
+ * back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer from which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which is being written.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_write_config_block(struct pci_dev *pdev, void *buf,
+ unsigned int len, unsigned int block_id)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_write_block)];
+ u32 reserved;
+ } pkt;
+ struct hv_pci_compl comp_pkt;
+ struct pci_write_block *write_blk;
+ u32 pkt_size;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.host_event);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_write_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk->message_type.type = PCI_WRITE_BLOCK;
+ write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ write_blk->block_id = block_id;
+ write_blk->byte_count = len;
+ memcpy(write_blk->bytes, buf, len);
+ pkt_size = offsetof(struct pci_write_block, bytes) + len;
+ /*
+ * This quirk is required on some hosts shipped around 2018, because
+ * these hosts don't check the pkt_size correctly (new hosts have been
+ * fixed since early 2019). The quirk is also safe on very old hosts
+ * and new hosts, because, on them, what really matters is the length
+ * specified in write_blk->byte_count.
+ */
+ pkt_size += sizeof(pkt.reserved);
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
+ (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.completion_status != 0) {
+ dev_err(&hbus->hdev->device,
+ "Write Config Block failed: 0x%x\n",
+ comp_pkt.completion_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hv_register_block_invalidate() - Invoked when a config block invalidation
+ * arrives from the back-end driver.
+ * @pdev: The PCI driver's representation for this device.
+ * @context: Identifies the device.
+ * @block_invalidate: Identifies all of the blocks being invalidated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ return -ENODEV;
+
+ hpdev->block_invalidate = block_invalidate;
+ hpdev->invalidate_context = context;
+
+ put_pcichild(hpdev);
+ return 0;
+
+}
+
+/* Interrupt management hooks */
+static void hv_int_desc_free(struct hv_pci_dev *hpdev,
+ struct tran_int_desc *int_desc)
+{
+ struct pci_delete_interrupt *int_pkt;
+ struct {
+ struct pci_packet pkt;
+ u8 buffer[sizeof(struct pci_delete_interrupt)];
+ } ctxt;
+
+ if (!int_desc->vector_count) {
+ kfree(int_desc);
+ return;
+ }
+ memset(&ctxt, 0, sizeof(ctxt));
+ int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt->message_type.type =
+ PCI_DELETE_INTERRUPT_MESSAGE;
+ int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+ int_pkt->int_desc = *int_desc;
+ vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
+ 0, VM_PKT_DATA_INBAND, 0);
+ kfree(int_desc);
+}
+
+/**
+ * hv_msi_free() - Free the MSI.
+ * @domain: The interrupt domain pointer
+ * @info: Extra MSI-related context
+ * @irq: Identifies the IRQ.
+ *
+ * The Hyper-V parent partition and hypervisor are tracking the
+ * messages that are in use, keeping the interrupt redirection
+ * table up to date. This callback sends a message that frees
+ * the IRT entry and related tracking nonsense.
+ */
+static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int irq)
+{
+ struct hv_pcibus_device *hbus;
+ struct hv_pci_dev *hpdev;
+ struct pci_dev *pdev;
+ struct tran_int_desc *int_desc;
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
+ struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
+
+ pdev = msi_desc_to_pci_dev(msi);
+ hbus = info->data;
+ int_desc = irq_data_get_irq_chip_data(irq_data);
+ if (!int_desc)
+ return;
+
+ irq_data->chip_data = NULL;
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev) {
+ kfree(int_desc);
+ return;
+ }
+
+ hv_int_desc_free(hpdev, int_desc);
+ put_pcichild(hpdev);
+}
+
+static void hv_irq_mask(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+ if (data->parent_data->chip->irq_mask)
+ irq_chip_mask_parent(data);
+}
+
+static void hv_irq_unmask(struct irq_data *data)
+{
+ hv_arch_irq_unmask(data);
+
+ if (data->parent_data->chip->irq_unmask)
+ irq_chip_unmask_parent(data);
+ pci_msi_unmask_irq(data);
+}
+
+struct compose_comp_ctxt {
+ struct hv_pci_compl comp_pkt;
+ struct tran_int_desc int_desc;
+};
+
+static void hv_pci_compose_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct compose_comp_ctxt *comp_pkt = context;
+ struct pci_create_int_response *int_resp =
+ (struct pci_create_int_response *)resp;
+
+ if (resp_packet_size < sizeof(*int_resp)) {
+ comp_pkt->comp_pkt.completion_status = -1;
+ goto out;
+ }
+ comp_pkt->comp_pkt.completion_status = resp->status;
+ comp_pkt->int_desc = int_resp->int_desc;
+out:
+ complete(&comp_pkt->comp_pkt.host_event);
+}
+
+static u32 hv_compose_msi_req_v1(
+ struct pci_create_interrupt *int_pkt,
+ u32 slot, u8 vector, u16 vector_count)
+{
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
+
+ /*
+ * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
+ * hv_irq_unmask().
+ */
+ int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+
+ return sizeof(*int_pkt);
+}
+
+/*
+ * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
+ * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
+ * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
+ * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
+ * not irrelevant because Hyper-V chooses the physical CPU to handle the
+ * interrupts based on the vCPU specified in message sent to the vPCI VSP in
+ * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
+ * but assigning too many vPCI device interrupts to the same pCPU can cause a
+ * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
+ * to spread out the pCPUs that it selects.
+ *
+ * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
+ * to always return the same dummy vCPU, because a second call to
+ * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
+ * new pCPU for the interrupt. But for the multi-MSI case, the second call to
+ * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
+ * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
+ * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
+ * the same pCPU, even though the vCPUs will be spread out by later calls
+ * to hv_irq_unmask(), but that is the best we can do now.
+ *
+ * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
+ * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
+ * enhancement is planned for a future version. With that enhancement, the
+ * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
+ * device will be spread across multiple pCPUs.
+ */
+
+/*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
+{
+ return cpumask_first_and(affinity, cpu_online_mask);
+}
+
+/*
+ * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
+ */
+static int hv_compose_multi_msi_req_get_cpu(void)
+{
+ static DEFINE_SPINLOCK(multi_msi_cpu_lock);
+
+ /* -1 means starting with CPU 0 */
+ static int cpu_next = -1;
+
+ unsigned long flags;
+ int cpu;
+
+ spin_lock_irqsave(&multi_msi_cpu_lock, flags);
+
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
+ false);
+ cpu = cpu_next;
+
+ spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+
+ return cpu;
+}
+
+static u32 hv_compose_msi_req_v2(
+ struct pci_create_interrupt2 *int_pkt, int cpu,
+ u32 slot, u8 vector, u16 vector_count)
+{
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
+
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v3(
+ struct pci_create_interrupt3 *int_pkt, int cpu,
+ u32 slot, u32 vector, u16 vector_count)
+{
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.reserved = 0;
+ int_pkt->int_desc.vector_count = vector_count;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
+
+ return sizeof(*int_pkt);
+}
+
+/**
+ * hv_compose_msi_msg() - Supplies a valid MSI address/data
+ * @data: Everything about this MSI
+ * @msg: Buffer that is filled in by this function
+ *
+ * This function unpacks the IRQ looking for target CPU set, IDT
+ * vector and mode and sends a message to the parent partition
+ * asking for a mapping for that tuple in this partition. The
+ * response supplies a data value and address to which that data
+ * should be written to trigger that interrupt.
+ */
+static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct hv_pcibus_device *hbus;
+ struct vmbus_channel *channel;
+ struct hv_pci_dev *hpdev;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ const struct cpumask *dest;
+ struct compose_comp_ctxt comp;
+ struct tran_int_desc *int_desc;
+ struct msi_desc *msi_desc;
+ /*
+ * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
+ * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
+ */
+ u16 vector_count;
+ u32 vector;
+ struct {
+ struct pci_packet pci_pkt;
+ union {
+ struct pci_create_interrupt v1;
+ struct pci_create_interrupt2 v2;
+ struct pci_create_interrupt3 v3;
+ } int_pkts;
+ } __packed ctxt;
+ bool multi_msi;
+ u64 trans_id;
+ u32 size;
+ int ret;
+ int cpu;
+
+ msi_desc = irq_data_get_msi_desc(data);
+ multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
+ msi_desc->nvec_used > 1;
+
+ /* Reuse the previous allocation */
+ if (data->chip_data && multi_msi) {
+ int_desc = data->chip_data;
+ msg->address_hi = int_desc->address >> 32;
+ msg->address_lo = int_desc->address & 0xffffffff;
+ msg->data = int_desc->data;
+ return;
+ }
+
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ dest = irq_data_get_effective_affinity_mask(data);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ channel = hbus->hdev->channel;
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ goto return_null_message;
+
+ /* Free any previous message that might have already been composed. */
+ if (data->chip_data && !multi_msi) {
+ int_desc = data->chip_data;
+ data->chip_data = NULL;
+ hv_int_desc_free(hpdev, int_desc);
+ }
+
+ int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
+ if (!int_desc)
+ goto drop_reference;
+
+ if (multi_msi) {
+ /*
+ * If this is not the first MSI of Multi MSI, we already have
+ * a mapping. Can exit early.
+ */
+ if (msi_desc->irq != data->irq) {
+ data->chip_data = int_desc;
+ int_desc->address = msi_desc->msg.address_lo |
+ (u64)msi_desc->msg.address_hi << 32;
+ int_desc->data = msi_desc->msg.data +
+ (data->irq - msi_desc->irq);
+ msg->address_hi = msi_desc->msg.address_hi;
+ msg->address_lo = msi_desc->msg.address_lo;
+ msg->data = int_desc->data;
+ put_pcichild(hpdev);
+ return;
+ }
+ /*
+ * The vector we select here is a dummy value. The correct
+ * value gets sent to the hypervisor in unmask(). This needs
+ * to be aligned with the count, and also not zero. Multi-msi
+ * is powers of 2 up to 32, so 32 will always work here.
+ */
+ vector = 32;
+ vector_count = msi_desc->nvec_used;
+ cpu = hv_compose_multi_msi_req_get_cpu();
+ } else {
+ vector = hv_msi_get_int_vector(data);
+ vector_count = 1;
+ cpu = hv_compose_msi_req_get_cpu(dest);
+ }
+
+ /*
+ * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
+ * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
+ * for better readability.
+ */
+ memset(&ctxt, 0, sizeof(ctxt));
+ init_completion(&comp.comp_pkt.host_event);
+ ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
+ ctxt.pci_pkt.compl_ctxt = &comp;
+
+ switch (hbus->protocol_version) {
+ case PCI_PROTOCOL_VERSION_1_1:
+ size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
+ hpdev->desc.win_slot.slot,
+ (u8)vector,
+ vector_count);
+ break;
+
+ case PCI_PROTOCOL_VERSION_1_2:
+ case PCI_PROTOCOL_VERSION_1_3:
+ size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
+ cpu,
+ hpdev->desc.win_slot.slot,
+ (u8)vector,
+ vector_count);
+ break;
+
+ case PCI_PROTOCOL_VERSION_1_4:
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
+ cpu,
+ hpdev->desc.win_slot.slot,
+ vector,
+ vector_count);
+ break;
+
+ default:
+ /* As we only negotiate protocol versions known to this driver,
+ * this path should never hit. However, this is it not a hot
+ * path so we print a message to aid future updates.
+ */
+ dev_err(&hbus->hdev->device,
+ "Unexpected vPCI protocol, update driver.");
+ goto free_int_desc;
+ }
+
+ ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+ size, (unsigned long)&ctxt.pci_pkt,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Sending request for interrupt failed: 0x%x",
+ comp.comp_pkt.completion_status);
+ goto free_int_desc;
+ }
+
+ /*
+ * Prevents hv_pci_onchannelcallback() from running concurrently
+ * in the tasklet.
+ */
+ tasklet_disable_in_atomic(&channel->callback_event);
+
+ /*
+ * Since this function is called with IRQ locks held, can't
+ * do normal wait for completion; instead poll.
+ */
+ while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
+ unsigned long flags;
+
+ /* 0xFFFF means an invalid PCI VENDOR ID. */
+ if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
+ dev_err_once(&hbus->hdev->device,
+ "the device has gone\n");
+ goto enable_tasklet;
+ }
+
+ /*
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
+ */
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ if (unlikely(channel->onchannel_callback == NULL)) {
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+ goto enable_tasklet;
+ }
+ hv_pci_onchannelcallback(hbus);
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
+
+ udelay(100);
+ }
+
+ tasklet_enable(&channel->callback_event);
+
+ if (comp.comp_pkt.completion_status < 0) {
+ dev_err(&hbus->hdev->device,
+ "Request for interrupt failed: 0x%x",
+ comp.comp_pkt.completion_status);
+ goto free_int_desc;
+ }
+
+ /*
+ * Record the assignment so that this can be unwound later. Using
+ * irq_set_chip_data() here would be appropriate, but the lock it takes
+ * is already held.
+ */
+ *int_desc = comp.int_desc;
+ data->chip_data = int_desc;
+
+ /* Pass up the result. */
+ msg->address_hi = comp.int_desc.address >> 32;
+ msg->address_lo = comp.int_desc.address & 0xffffffff;
+ msg->data = comp.int_desc.data;
+
+ put_pcichild(hpdev);
+ return;
+
+enable_tasklet:
+ tasklet_enable(&channel->callback_event);
+ /*
+ * The completion packet on the stack becomes invalid after 'return';
+ * remove the ID from the VMbus requestor if the identifier is still
+ * mapped to/associated with the packet. (The identifier could have
+ * been 're-used', i.e., already removed and (re-)mapped.)
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
+free_int_desc:
+ kfree(int_desc);
+drop_reference:
+ put_pcichild(hpdev);
+return_null_message:
+ msg->address_hi = 0;
+ msg->address_lo = 0;
+ msg->data = 0;
+}
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip hv_msi_irq_chip = {
+ .name = "Hyper-V PCIe MSI",
+ .irq_compose_msi_msg = hv_compose_msi_msg,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#ifdef CONFIG_X86
+ .irq_ack = irq_chip_ack_parent,
+#elif defined(CONFIG_ARM64)
+ .irq_eoi = irq_chip_eoi_parent,
+#endif
+ .irq_mask = hv_irq_mask,
+ .irq_unmask = hv_irq_unmask,
+};
+
+static struct msi_domain_ops hv_msi_ops = {
+ .msi_prepare = hv_msi_prepare,
+ .msi_free = hv_msi_free,
+};
+
+/**
+ * hv_pcie_init_irq_domain() - Initialize IRQ domain
+ * @hbus: The root PCI bus
+ *
+ * This function creates an IRQ domain which will be used for
+ * interrupts from devices that have been passed through. These
+ * devices only support MSI and MSI-X, not line-based interrupts
+ * or simulations of line-based interrupts through PCIe's
+ * fabric-layer messages. Because interrupts are remapped, we
+ * can support multi-message MSI here.
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
+{
+ hbus->msi_info.chip = &hv_msi_irq_chip;
+ hbus->msi_info.ops = &hv_msi_ops;
+ hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX);
+ hbus->msi_info.handler = FLOW_HANDLER;
+ hbus->msi_info.handler_name = FLOW_NAME;
+ hbus->msi_info.data = hbus;
+ hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
+ &hbus->msi_info,
+ hv_pci_get_root_domain());
+ if (!hbus->irq_domain) {
+ dev_err(&hbus->hdev->device,
+ "Failed to build an MSI IRQ domain\n");
+ return -ENODEV;
+ }
+
+ dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
+
+ return 0;
+}
+
+/**
+ * get_bar_size() - Get the address space consumed by a BAR
+ * @bar_val: Value that a BAR returned after -1 was written
+ * to it.
+ *
+ * This function returns the size of the BAR, rounded up to 1
+ * page. It has to be rounded up because the hypervisor's page
+ * table entry that maps the BAR into the VM can't specify an
+ * offset within a page. The invariant is that the hypervisor
+ * must place any BARs of smaller than page length at the
+ * beginning of a page.
+ *
+ * Return: Size in bytes of the consumed MMIO space.
+ */
+static u64 get_bar_size(u64 bar_val)
+{
+ return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
+ PAGE_SIZE);
+}
+
+/**
+ * survey_child_resources() - Total all MMIO requirements
+ * @hbus: Root PCI bus, as understood by this driver
+ */
+static void survey_child_resources(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+ resource_size_t bar_size = 0;
+ unsigned long flags;
+ struct completion *event;
+ u64 bar_val;
+ int i;
+
+ /* If nobody is waiting on the answer, don't compute it. */
+ event = xchg(&hbus->survey_event, NULL);
+ if (!event)
+ return;
+
+ /* If the answer has already been computed, go with it. */
+ if (hbus->low_mmio_space || hbus->high_mmio_space) {
+ complete(event);
+ return;
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+ /*
+ * Due to an interesting quirk of the PCI spec, all memory regions
+ * for a child device are a power of 2 in size and aligned in memory,
+ * so it's sufficient to just add them up without tracking alignment.
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
+ dev_err(&hbus->hdev->device,
+ "There's an I/O BAR in this list!\n");
+
+ if (hpdev->probed_bar[i] != 0) {
+ /*
+ * A probed BAR has all the upper bits set that
+ * can be changed.
+ */
+
+ bar_val = hpdev->probed_bar[i];
+ if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar_val |=
+ ((u64)hpdev->probed_bar[++i] << 32);
+ else
+ bar_val |= 0xffffffff00000000ULL;
+
+ bar_size = get_bar_size(bar_val);
+
+ if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ hbus->high_mmio_space += bar_size;
+ else
+ hbus->low_mmio_space += bar_size;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ complete(event);
+}
+
+/**
+ * prepopulate_bars() - Fill in BARs with defaults
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * The core PCI driver code seems much, much happier if the BARs
+ * for a device have values upon first scan. So fill them in.
+ * The algorithm below works down from large sizes to small,
+ * attempting to pack the assignments optimally. The assumption,
+ * enforced in other parts of the code, is that the beginning of
+ * the memory-mapped I/O space will be aligned on the largest
+ * BAR size.
+ */
+static void prepopulate_bars(struct hv_pcibus_device *hbus)
+{
+ resource_size_t high_size = 0;
+ resource_size_t low_size = 0;
+ resource_size_t high_base = 0;
+ resource_size_t low_base = 0;
+ resource_size_t bar_size;
+ struct hv_pci_dev *hpdev;
+ unsigned long flags;
+ u64 bar_val;
+ u32 command;
+ bool high;
+ int i;
+
+ if (hbus->low_mmio_space) {
+ low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+ low_base = hbus->low_mmio_res->start;
+ }
+
+ if (hbus->high_mmio_space) {
+ high_size = 1ULL <<
+ (63 - __builtin_clzll(hbus->high_mmio_space));
+ high_base = hbus->high_mmio_res->start;
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+ /*
+ * Clear the memory enable bit, in case it's already set. This occurs
+ * in the suspend path of hibernation, where the device is suspended,
+ * resumed and suspended again: see hibernation_snapshot() and
+ * hibernation_platform_enter().
+ *
+ * If the memory enable bit is already set, Hyper-V silently ignores
+ * the below BAR updates, and the related PCI device driver can not
+ * work, because reading from the device register(s) always returns
+ * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+ }
+
+ /* Pick addresses for the BARs. */
+ do {
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar_val = hpdev->probed_bar[i];
+ if (bar_val == 0)
+ continue;
+ high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (high) {
+ bar_val |=
+ ((u64)hpdev->probed_bar[i + 1]
+ << 32);
+ } else {
+ bar_val |= 0xffffffffULL << 32;
+ }
+ bar_size = get_bar_size(bar_val);
+ if (high) {
+ if (high_size != bar_size) {
+ i++;
+ continue;
+ }
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4,
+ (u32)(high_base & 0xffffff00));
+ i++;
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4, (u32)(high_base >> 32));
+ high_base += bar_size;
+ } else {
+ if (low_size != bar_size)
+ continue;
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4,
+ (u32)(low_base & 0xffffff00));
+ low_base += bar_size;
+ }
+ }
+ if (high_size <= 1 && low_size <= 1) {
+ /*
+ * No need to set the PCI_COMMAND_MEMORY bit as
+ * the core PCI driver doesn't require the bit
+ * to be pre-set. Actually here we intentionally
+ * keep the bit off so that the PCI BAR probing
+ * in the core PCI driver doesn't cause Hyper-V
+ * to unnecessarily unmap/map the virtual BARs
+ * from/to the physical BARs multiple times.
+ * This reduces the VM boot time significantly
+ * if the BAR sizes are huge.
+ */
+ break;
+ }
+ }
+
+ high_size >>= 1;
+ low_size >>= 1;
+ } while (high_size || low_size);
+
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+}
+
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+ char name[SLOT_NAME_SIZE];
+ int slot_nr;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (hpdev->pci_slot)
+ continue;
+
+ slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+ snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+ hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
+ name, NULL);
+ if (IS_ERR(hpdev->pci_slot)) {
+ pr_warn("pci_create slot %s failed\n", name);
+ hpdev->pci_slot = NULL;
+ }
+ }
+}
+
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (!hpdev->pci_slot)
+ continue;
+ pci_destroy_slot(hpdev->pci_slot);
+ hpdev->pci_slot = NULL;
+ }
+}
+
+/*
+ * Set NUMA node for the devices on the bus
+ */
+static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus = hbus->bridge->bus;
+ struct hv_pci_dev *hv_dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
+ if (!hv_dev)
+ continue;
+
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
+
+ put_pcichild(hv_dev);
+ }
+}
+
+/**
+ * create_root_hv_pci_bus() - Expose a new root PCI bus
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
+{
+ int error;
+ struct pci_host_bridge *bridge = hbus->bridge;
+
+ bridge->dev.parent = &hbus->hdev->device;
+ bridge->sysdata = &hbus->sysdata;
+ bridge->ops = &hv_pcifront_ops;
+
+ error = pci_scan_root_bus_bridge(bridge);
+ if (error)
+ return error;
+
+ pci_lock_rescan_remove();
+ hv_pci_assign_numa_node(hbus);
+ pci_bus_assign_resources(bridge->bus);
+ hv_pci_assign_slots(hbus);
+ pci_bus_add_devices(bridge->bus);
+ pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_installed;
+ return 0;
+}
+
+struct q_res_req_compl {
+ struct completion host_event;
+ struct hv_pci_dev *hpdev;
+};
+
+/**
+ * q_resource_requirements() - Query Resource Requirements
+ * @context: The completion context.
+ * @resp: The response that came from the host.
+ * @resp_packet_size: The size in bytes of resp.
+ *
+ * This function is invoked on completion of a Query Resource
+ * Requirements packet.
+ */
+static void q_resource_requirements(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct q_res_req_compl *completion = context;
+ struct pci_q_res_req_response *q_res_req =
+ (struct pci_q_res_req_response *)resp;
+ s32 status;
+ int i;
+
+ status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
+ if (status < 0) {
+ dev_err(&completion->hpdev->hbus->hdev->device,
+ "query resource requirements failed: %x\n",
+ status);
+ } else {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ completion->hpdev->probed_bar[i] =
+ q_res_req->probed_bar[i];
+ }
+ }
+
+ complete(&completion->host_event);
+}
+
+/**
+ * new_pcichild_device() - Create a new child device
+ * @hbus: The internal struct tracking this root PCI bus.
+ * @desc: The information supplied so far from the host
+ * about the device.
+ *
+ * This function creates the tracking structure for a new child
+ * device and kicks off the process of figuring out what it is.
+ *
+ * Return: Pointer to the new tracking struct
+ */
+static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
+ struct hv_pcidev_description *desc)
+{
+ struct hv_pci_dev *hpdev;
+ struct pci_child_message *res_req;
+ struct q_res_req_compl comp_pkt;
+ struct {
+ struct pci_packet init_packet;
+ u8 buffer[sizeof(struct pci_child_message)];
+ } pkt;
+ unsigned long flags;
+ int ret;
+
+ hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
+ if (!hpdev)
+ return NULL;
+
+ hpdev->hbus = hbus;
+
+ memset(&pkt, 0, sizeof(pkt));
+ init_completion(&comp_pkt.host_event);
+ comp_pkt.hpdev = hpdev;
+ pkt.init_packet.compl_ctxt = &comp_pkt;
+ pkt.init_packet.completion_func = q_resource_requirements;
+ res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
+ res_req->wslot.slot = desc->win_slot.slot;
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
+ sizeof(struct pci_child_message),
+ (unsigned long)&pkt.init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto error;
+
+ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+ goto error;
+
+ hpdev->desc = *desc;
+ refcount_set(&hpdev->refs, 1);
+ get_pcichild(hpdev);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+ list_add_tail(&hpdev->list_entry, &hbus->children);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ return hpdev;
+
+error:
+ kfree(hpdev);
+ return NULL;
+}
+
+/**
+ * get_pcichild_wslot() - Find device from slot
+ * @hbus: Root PCI bus, as understood by this driver
+ * @wslot: Location on the bus
+ *
+ * This function looks up a PCI device and returns the internal
+ * representation of it. It acquires a reference on it, so that
+ * the device won't be deleted while somebody is using it. The
+ * caller is responsible for calling put_pcichild() to release
+ * this reference.
+ *
+ * Return: Internal representation of a PCI device
+ */
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+ u32 wslot)
+{
+ unsigned long flags;
+ struct hv_pci_dev *iter, *hpdev = NULL;
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry(iter, &hbus->children, list_entry) {
+ if (iter->desc.win_slot.slot == wslot) {
+ hpdev = iter;
+ get_pcichild(hpdev);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ return hpdev;
+}
+
+/**
+ * pci_devices_present_work() - Handle new list of child devices
+ * @work: Work struct embedded in struct hv_dr_work
+ *
+ * "Bus Relations" is the Windows term for "children of this
+ * bus." The terminology is preserved here for people trying to
+ * debug the interaction between Hyper-V and Linux. This
+ * function is called when the parent partition reports a list
+ * of functions that should be observed under this PCI Express
+ * port (bus).
+ *
+ * This function updates the list, and must tolerate being
+ * called multiple times with the same information. The typical
+ * number of child devices is one, with very atypical cases
+ * involving three or four, so the algorithms used here can be
+ * simple and inefficient.
+ *
+ * It must also treat the omission of a previously observed device as
+ * notification that the device no longer exists.
+ *
+ * Note that this function is serialized with hv_eject_device_work(),
+ * because both are pushed to the ordered workqueue hbus->wq.
+ */
+static void pci_devices_present_work(struct work_struct *work)
+{
+ u32 child_no;
+ bool found;
+ struct hv_pcidev_description *new_desc;
+ struct hv_pci_dev *hpdev;
+ struct hv_pcibus_device *hbus;
+ struct list_head removed;
+ struct hv_dr_work *dr_wrk;
+ struct hv_dr_state *dr = NULL;
+ unsigned long flags;
+
+ dr_wrk = container_of(work, struct hv_dr_work, wrk);
+ hbus = dr_wrk->bus;
+ kfree(dr_wrk);
+
+ INIT_LIST_HEAD(&removed);
+
+ /* Pull this off the queue and process it if it was the last one. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ while (!list_empty(&hbus->dr_list)) {
+ dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
+ list_entry);
+ list_del(&dr->list_entry);
+
+ /* Throw this away if the list still has stuff in it. */
+ if (!list_empty(&hbus->dr_list)) {
+ kfree(dr);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (!dr)
+ return;
+
+ mutex_lock(&hbus->state_lock);
+
+ /* First, mark all existing children as reported missing. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ hpdev->reported_missing = true;
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Next, add back any reported devices. */
+ for (child_no = 0; child_no < dr->device_count; child_no++) {
+ found = false;
+ new_desc = &dr->func[child_no];
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
+ (hpdev->desc.v_id == new_desc->v_id) &&
+ (hpdev->desc.d_id == new_desc->d_id) &&
+ (hpdev->desc.ser == new_desc->ser)) {
+ hpdev->reported_missing = false;
+ found = true;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (!found) {
+ hpdev = new_pcichild_device(hbus, new_desc);
+ if (!hpdev)
+ dev_err(&hbus->hdev->device,
+ "couldn't record a child device.\n");
+ }
+ }
+
+ /* Move missing children to a list on the stack. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ do {
+ found = false;
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (hpdev->reported_missing) {
+ found = true;
+ put_pcichild(hpdev);
+ list_move_tail(&hpdev->list_entry, &removed);
+ break;
+ }
+ }
+ } while (found);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Delete everything that should no longer exist. */
+ while (!list_empty(&removed)) {
+ hpdev = list_first_entry(&removed, struct hv_pci_dev,
+ list_entry);
+ list_del(&hpdev->list_entry);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
+ put_pcichild(hpdev);
+ }
+
+ switch (hbus->state) {
+ case hv_pcibus_installed:
+ /*
+ * Tell the core to rescan bus
+ * because there may have been changes.
+ */
+ pci_lock_rescan_remove();
+ pci_scan_child_bus(hbus->bridge->bus);
+ hv_pci_assign_numa_node(hbus);
+ hv_pci_assign_slots(hbus);
+ pci_unlock_rescan_remove();
+ break;
+
+ case hv_pcibus_init:
+ case hv_pcibus_probed:
+ survey_child_resources(hbus);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&hbus->state_lock);
+
+ kfree(dr);
+}
+
+/**
+ * hv_pci_start_relations_work() - Queue work to start device discovery
+ * @hbus: Root PCI bus, as understood by this driver
+ * @dr: The list of children returned from host
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
+ struct hv_dr_state *dr)
+{
+ struct hv_dr_work *dr_wrk;
+ unsigned long flags;
+ bool pending_dr;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hbus->hdev->device,
+ "PCI VMBus BUS_RELATIONS: ignored\n");
+ return -ENOENT;
+ }
+
+ dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
+ if (!dr_wrk)
+ return -ENOMEM;
+
+ INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
+ dr_wrk->bus = hbus;
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ /*
+ * If pending_dr is true, we have already queued a work,
+ * which will see the new dr. Otherwise, we need to
+ * queue a new work.
+ */
+ pending_dr = !list_empty(&hbus->dr_list);
+ list_add_tail(&dr->list_entry, &hbus->dr_list);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (pending_dr)
+ kfree(dr_wrk);
+ else
+ queue_work(hbus->wq, &dr_wrk->wrk);
+
+ return 0;
+}
+
+/**
+ * hv_pci_devices_present() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * Process a new list of devices on the bus. The list of devices is
+ * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
+ * whenever a new list of devices for this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present2() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * This function is the v2 version of hv_pci_devices_present()
+ */
+static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations2 *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ dr->func[i].flags = relations->func[i].flags;
+ dr->func[i].virtual_numa_node =
+ relations->func[i].virtual_numa_node;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
+}
+
+/**
+ * hv_eject_device_work() - Asynchronously handles ejection
+ * @work: Work struct embedded in internal device struct
+ *
+ * This function handles ejecting a device. Windows will
+ * attempt to gracefully eject a device, waiting 60 seconds to
+ * hear back from the guest OS that this completed successfully.
+ * If this timer expires, the device will be forcibly removed.
+ */
+static void hv_eject_device_work(struct work_struct *work)
+{
+ struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
+ struct hv_pci_dev *hpdev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ int wslot;
+ struct {
+ struct pci_packet pkt;
+ u8 buffer[sizeof(struct pci_eject_response)];
+ } ctxt;
+
+ hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
+
+ mutex_lock(&hbus->state_lock);
+
+ /*
+ * Ejection can come before or after the PCI bus has been set up, so
+ * attempt to find it and tear down the bus state, if it exists. This
+ * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
+ * because hbus->bridge->bus may not exist yet.
+ */
+ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
+ pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
+ if (pdev) {
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
+ ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
+ sizeof(*ejct_pkt), 0,
+ VM_PKT_DATA_INBAND, 0);
+
+ /* For the get_pcichild() in hv_pci_eject_device() */
+ put_pcichild(hpdev);
+ /* For the two refs got in new_pcichild_device() */
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
+ /* hpdev has been freed. Do not use it any more. */
+
+ mutex_unlock(&hbus->state_lock);
+}
+
+/**
+ * hv_pci_eject_device() - Handles device ejection
+ * @hpdev: Internal device tracking struct
+ *
+ * This function is invoked when an ejection packet arrives. It
+ * just schedules work so that we don't re-enter the packet
+ * delivery code handling the ejection.
+ */
+static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
+{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct hv_device *hdev = hbus->hdev;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+ return;
+ }
+
+ get_pcichild(hpdev);
+ INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+ queue_work(hbus->wq, &hpdev->wrk);
+}
+
+/**
+ * hv_pci_onchannelcallback() - Handles incoming packets
+ * @context: Internal bus tracking struct
+ *
+ * This function is invoked whenever the host sends a packet to
+ * this channel (which is private to this root PCI bus).
+ */
+static void hv_pci_onchannelcallback(void *context)
+{
+ const int packet_size = 0x100;
+ int ret;
+ struct hv_pcibus_device *hbus = context;
+ struct vmbus_channel *chan = hbus->hdev->channel;
+ u32 bytes_recvd;
+ u64 req_id, req_addr;
+ struct vmpacket_descriptor *desc;
+ unsigned char *buffer;
+ int bufferlen = packet_size;
+ struct pci_packet *comp_packet;
+ struct pci_response *response;
+ struct pci_incoming_message *new_message;
+ struct pci_bus_relations *bus_rel;
+ struct pci_bus_relations2 *bus_rel2;
+ struct pci_dev_inval_block *inval;
+ struct pci_dev_incoming *dev_message;
+ struct hv_pci_dev *hpdev;
+ unsigned long flags;
+
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
+
+ while (1) {
+ ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
+ &bytes_recvd, &req_id);
+
+ if (ret == -ENOBUFS) {
+ kfree(buffer);
+ /* Handle large packet */
+ bufferlen = bytes_recvd;
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+ if (!buffer)
+ return;
+ continue;
+ }
+
+ /* Zero length indicates there are no more packets. */
+ if (ret || !bytes_recvd)
+ break;
+
+ /*
+ * All incoming packets must be at least as large as a
+ * response.
+ */
+ if (bytes_recvd <= sizeof(struct pci_response))
+ continue;
+ desc = (struct vmpacket_descriptor *)buffer;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+
+ lock_requestor(chan, flags);
+ req_addr = __vmbus_request_addr_match(chan, req_id,
+ VMBUS_RQST_ADDR_ANY);
+ if (req_addr == VMBUS_RQST_ERROR) {
+ unlock_requestor(chan, flags);
+ dev_err(&hbus->hdev->device,
+ "Invalid transaction ID %llx\n",
+ req_id);
+ break;
+ }
+ comp_packet = (struct pci_packet *)req_addr;
+ response = (struct pci_response *)buffer;
+ /*
+ * Call ->completion_func() within the critical section to make
+ * sure that the packet pointer is still valid during the call:
+ * here 'valid' means that there's a task still waiting for the
+ * completion, and that the packet data is still on the waiting
+ * task's stack. Cf. hv_compose_msi_msg().
+ */
+ comp_packet->completion_func(comp_packet->compl_ctxt,
+ response,
+ bytes_recvd);
+ unlock_requestor(chan, flags);
+ break;
+
+ case VM_PKT_DATA_INBAND:
+
+ new_message = (struct pci_incoming_message *)buffer;
+ switch (new_message->message_type.type) {
+ case PCI_BUS_RELATIONS:
+
+ bus_rel = (struct pci_bus_relations *)buffer;
+ if (bytes_recvd < sizeof(*bus_rel) ||
+ bytes_recvd <
+ struct_size(bus_rel, func,
+ bus_rel->device_count)) {
+ dev_err(&hbus->hdev->device,
+ "bus relations too small\n");
+ break;
+ }
+
+ hv_pci_devices_present(hbus, bus_rel);
+ break;
+
+ case PCI_BUS_RELATIONS2:
+
+ bus_rel2 = (struct pci_bus_relations2 *)buffer;
+ if (bytes_recvd < sizeof(*bus_rel2) ||
+ bytes_recvd <
+ struct_size(bus_rel2, func,
+ bus_rel2->device_count)) {
+ dev_err(&hbus->hdev->device,
+ "bus relations v2 too small\n");
+ break;
+ }
+
+ hv_pci_devices_present2(hbus, bus_rel2);
+ break;
+
+ case PCI_EJECT:
+
+ dev_message = (struct pci_dev_incoming *)buffer;
+ if (bytes_recvd < sizeof(*dev_message)) {
+ dev_err(&hbus->hdev->device,
+ "eject message too small\n");
+ break;
+ }
+ hpdev = get_pcichild_wslot(hbus,
+ dev_message->wslot.slot);
+ if (hpdev) {
+ hv_pci_eject_device(hpdev);
+ put_pcichild(hpdev);
+ }
+ break;
+
+ case PCI_INVALIDATE_BLOCK:
+
+ inval = (struct pci_dev_inval_block *)buffer;
+ if (bytes_recvd < sizeof(*inval)) {
+ dev_err(&hbus->hdev->device,
+ "invalidate message too small\n");
+ break;
+ }
+ hpdev = get_pcichild_wslot(hbus,
+ inval->wslot.slot);
+ if (hpdev) {
+ if (hpdev->block_invalidate) {
+ hpdev->block_invalidate(
+ hpdev->invalidate_context,
+ inval->block_mask);
+ }
+ put_pcichild(hpdev);
+ }
+ break;
+
+ default:
+ dev_warn(&hbus->hdev->device,
+ "Unimplemented protocol message %x\n",
+ new_message->message_type.type);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(&hbus->hdev->device,
+ "unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
+ break;
+ }
+ }
+
+ kfree(buffer);
+}
+
+/**
+ * hv_pci_protocol_negotiation() - Set up protocol
+ * @hdev: VMBus's tracking struct for this root PCI bus.
+ * @version: Array of supported channel protocol versions in
+ * the order of probing - highest go first.
+ * @num_version: Number of elements in the version array.
+ *
+ * This driver is intended to support running on Windows 10
+ * (server) and later versions. It will not run on earlier
+ * versions, as they assume that many of the operations which
+ * Linux needs accomplished with a spinlock held were done via
+ * asynchronous messaging via VMBus. Windows 10 increases the
+ * surface area of PCI emulation so that these actions can take
+ * place by suspending a virtual processor for their duration.
+ *
+ * This function negotiates the channel protocol version,
+ * failing if the host doesn't support the necessary protocol
+ * level.
+ */
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+ enum pci_protocol_version_t version[],
+ int num_version)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_version_request *version_req;
+ struct hv_pci_compl comp_pkt;
+ struct pci_packet *pkt;
+ int ret;
+ int i;
+
+ /*
+ * Initiate the handshake with the host and negotiate
+ * a version that the host can support. We start with the
+ * highest version number and go down if the host cannot
+ * support it.
+ */
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+ version_req = (struct pci_version_request *)&pkt->message;
+ version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
+
+ for (i = 0; i < num_version; i++) {
+ version_req->protocol_version = version[i];
+ ret = vmbus_sendpacket(hdev->channel, version_req,
+ sizeof(struct pci_version_request),
+ (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
+ if (ret) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed to request version: %d",
+ ret);
+ goto exit;
+ }
+
+ if (comp_pkt.completion_status >= 0) {
+ hbus->protocol_version = version[i];
+ dev_info(&hdev->device,
+ "PCI VMBus probing: Using version %#x\n",
+ hbus->protocol_version);
+ goto exit;
+ }
+
+ if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed version request: %#x",
+ comp_pkt.completion_status);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ reinit_completion(&comp_pkt.host_event);
+ }
+
+ dev_err(&hdev->device,
+ "PCI pass-through VSP failed to find supported version");
+ ret = -EPROTO;
+
+exit:
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_pci_free_bridge_windows() - Release memory regions for the
+ * bus
+ * @hbus: Root PCI bus, as understood by this driver
+ */
+static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
+{
+ /*
+ * Set the resources back to the way they looked when they
+ * were allocated by setting IORESOURCE_BUSY again.
+ */
+
+ if (hbus->low_mmio_space && hbus->low_mmio_res) {
+ hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
+ vmbus_free_mmio(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
+ }
+
+ if (hbus->high_mmio_space && hbus->high_mmio_res) {
+ hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
+ vmbus_free_mmio(hbus->high_mmio_res->start,
+ resource_size(hbus->high_mmio_res));
+ }
+}
+
+/**
+ * hv_pci_allocate_bridge_windows() - Allocate memory regions
+ * for the bus
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * This function calls vmbus_allocate_mmio(), which is itself a
+ * bit of a compromise. Ideally, we might change the pnp layer
+ * in the kernel such that it comprehends either PCI devices
+ * which are "grandchildren of ACPI," with some intermediate bus
+ * node (in this case, VMBus) or change it such that it
+ * understands VMBus. The pnp layer, however, has been declared
+ * deprecated, and not subject to change.
+ *
+ * The workaround, implemented here, is to ask VMBus to allocate
+ * MMIO space for this bus. VMBus itself knows which ranges are
+ * appropriate by looking at its own ACPI objects. Then, after
+ * these ranges are claimed, they're modified to look like they
+ * would have looked if the ACPI and pnp code had allocated
+ * bridge windows. These descriptors have to exist in this form
+ * in order to satisfy the code which will get invoked when the
+ * endpoint PCI function driver calls request_mem_region() or
+ * request_mem_region_exclusive().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
+{
+ resource_size_t align;
+ int ret;
+
+ if (hbus->low_mmio_space) {
+ align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+ ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
+ (u64)(u32)0xffffffff,
+ hbus->low_mmio_space,
+ align, false);
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
+ hbus->low_mmio_space);
+ return ret;
+ }
+
+ /* Modify this resource to become a bridge window. */
+ hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
+ hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
+ pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
+ }
+
+ if (hbus->high_mmio_space) {
+ align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
+ ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
+ 0x100000000, -1,
+ hbus->high_mmio_space, align,
+ false);
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
+ hbus->high_mmio_space);
+ goto release_low_mmio;
+ }
+
+ /* Modify this resource to become a bridge window. */
+ hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
+ hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
+ pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
+ }
+
+ return 0;
+
+release_low_mmio:
+ if (hbus->low_mmio_res) {
+ vmbus_free_mmio(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
+ }
+
+ return ret;
+}
+
+/**
+ * hv_allocate_config_window() - Find MMIO space for PCI Config
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * This function claims memory-mapped I/O space for accessing
+ * configuration space for the functions on this bus.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
+{
+ int ret;
+
+ /*
+ * Set up a region of MMIO space to use for accessing configuration
+ * space.
+ */
+ ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
+ PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
+ if (ret)
+ return ret;
+
+ /*
+ * vmbus_allocate_mmio() gets used for allocating both device endpoint
+ * resource claims (those which cannot be overlapped) and the ranges
+ * which are valid for the children of this bus, which are intended
+ * to be overlapped by those children. Set the flag on this claim
+ * meaning that this region can't be overlapped.
+ */
+
+ hbus->mem_config->flags |= IORESOURCE_BUSY;
+
+ return 0;
+}
+
+static void hv_free_config_window(struct hv_pcibus_device *hbus)
+{
+ vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
+}
+
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
+
+/**
+ * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_enter_d0(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_bus_d0_entry *d0_entry;
+ struct hv_pci_compl comp_pkt;
+ struct pci_packet *pkt;
+ bool retry = true;
+ int ret;
+
+enter_d0_retry:
+ /*
+ * Tell the host that the bus is ready to use, and moved into the
+ * powered-on state. This includes telling the host which region
+ * of memory-mapped I/O space has been chosen for configuration space
+ * access.
+ */
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+ d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry->message_type.type = PCI_BUS_D0ENTRY;
+ d0_entry->mmio_base = hbus->mem_config->start;
+
+ ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
+ (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
+ if (ret)
+ goto exit;
+
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
+ if (comp_pkt.completion_status < 0) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed D0 Entry with status %x\n",
+ comp_pkt.completion_status);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ret = 0;
+
+exit:
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_pci_query_relations() - Ask host to send list of child
+ * devices
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_query_relations(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_message message;
+ struct completion comp;
+ int ret;
+
+ /* Ask the host to send along the list of child devices */
+ init_completion(&comp);
+ if (cmpxchg(&hbus->survey_event, NULL, &comp))
+ return -ENOTEMPTY;
+
+ memset(&message, 0, sizeof(message));
+ message.type = PCI_QUERY_BUS_RELATIONS;
+
+ ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
+ 0, VM_PKT_DATA_INBAND, 0);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
+
+ /*
+ * In the case of fast device addition/removal, it's possible that
+ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+ * already got a PCI_BUS_RELATIONS* message from the host and the
+ * channel callback already scheduled a work to hbus->wq, which can be
+ * running pci_devices_present_work() -> survey_child_resources() ->
+ * complete(&hbus->survey_event), even after hv_pci_query_relations()
+ * exits and the stack variable 'comp' is no longer valid; as a result,
+ * a hang or a page fault may happen when the complete() calls
+ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+ * -ENODEV, there can't be any more work item scheduled to hbus->wq
+ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+ * channel->rescind = true.
+ */
+ flush_workqueue(hbus->wq);
+
+ return ret;
+}
+
+/**
+ * hv_send_resources_allocated() - Report local resource choices
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * The host OS is expecting to be sent a request as a message
+ * which contains all the resources that the device will use.
+ * The response contains those same resources, "translated"
+ * which is to say, the values which should be used by the
+ * hardware, when it delivers an interrupt. (MMIO resources are
+ * used in local terms.) This is nice for Windows, and lines up
+ * with the FDO/PDO split, which doesn't exist in Linux. Linux
+ * is deeply expecting to scan an emulated PCI configuration
+ * space. So this message is sent here only to drive the state
+ * machine on the host forward.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_allocated(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_resources_assigned *res_assigned;
+ struct pci_resources_assigned2 *res_assigned2;
+ struct hv_pci_compl comp_pkt;
+ struct hv_pci_dev *hpdev;
+ struct pci_packet *pkt;
+ size_t size_res;
+ int wslot;
+ int ret;
+
+ size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ ? sizeof(*res_assigned) : sizeof(*res_assigned2);
+
+ pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ret = 0;
+
+ for (wslot = 0; wslot < 256; wslot++) {
+ hpdev = get_pcichild_wslot(hbus, wslot);
+ if (!hpdev)
+ continue;
+
+ memset(pkt, 0, sizeof(*pkt) + size_res);
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+
+ if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ res_assigned =
+ (struct pci_resources_assigned *)&pkt->message;
+ res_assigned->message_type.type =
+ PCI_RESOURCES_ASSIGNED;
+ res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+ } else {
+ res_assigned2 =
+ (struct pci_resources_assigned2 *)&pkt->message;
+ res_assigned2->message_type.type =
+ PCI_RESOURCES_ASSIGNED2;
+ res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
+ }
+ put_pcichild(hpdev);
+
+ ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ size_res, (unsigned long)pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+ if (ret)
+ break;
+
+ if (comp_pkt.completion_status < 0) {
+ ret = -EPROTO;
+ dev_err(&hdev->device,
+ "resource allocated returned 0x%x",
+ comp_pkt.completion_status);
+ break;
+ }
+
+ hbus->wslot_res_allocated = wslot;
+ }
+
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_send_resources_released() - Report local resources
+ * released
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_released(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_child_message pkt;
+ struct hv_pci_dev *hpdev;
+ int wslot;
+ int ret;
+
+ for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
+ hpdev = get_pcichild_wslot(hbus, wslot);
+ if (!hpdev)
+ continue;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.message_type.type = PCI_RESOURCES_RELEASED;
+ pkt.wslot.slot = hpdev->desc.win_slot.slot;
+
+ put_pcichild(hpdev);
+
+ ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
+ VM_PKT_DATA_INBAND, 0);
+ if (ret)
+ return ret;
+
+ hbus->wslot_res_allocated = wslot - 1;
+ }
+
+ hbus->wslot_res_allocated = -1;
+
+ return 0;
+}
+
+#define HVPCI_DOM_MAP_SIZE (64 * 1024)
+static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
+
+/*
+ * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
+ * as invalid for passthrough PCI devices of this driver.
+ */
+#define HVPCI_DOM_INVALID 0
+
+/**
+ * hv_get_dom_num() - Get a valid PCI domain number
+ * Check if the PCI domain number is in use, and return another number if
+ * it is in use.
+ *
+ * @dom: Requested domain number
+ *
+ * return: domain number on success, HVPCI_DOM_INVALID on failure
+ */
+static u16 hv_get_dom_num(u16 dom)
+{
+ unsigned int i;
+
+ if (test_and_set_bit(dom, hvpci_dom_map) == 0)
+ return dom;
+
+ for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
+ if (test_and_set_bit(i, hvpci_dom_map) == 0)
+ return i;
+ }
+
+ return HVPCI_DOM_INVALID;
+}
+
+/**
+ * hv_put_dom_num() - Mark the PCI domain number as free
+ * @dom: Domain number to be freed
+ */
+static void hv_put_dom_num(u16 dom)
+{
+ clear_bit(dom, hvpci_dom_map);
+}
+
+/**
+ * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ * @dev_id: Identifies the device itself
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_probe(struct hv_device *hdev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct pci_host_bridge *bridge;
+ struct hv_pcibus_device *hbus;
+ u16 dom_req, dom;
+ char *name;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
+ if (!hbus)
+ return -ENOMEM;
+
+ hbus->bridge = bridge;
+ mutex_init(&hbus->state_lock);
+ hbus->state = hv_pcibus_init;
+ hbus->wslot_res_allocated = -1;
+
+ /*
+ * The PCI bus "domain" is what is called "segment" in ACPI and other
+ * specs. Pull it from the instance ID, to get something usually
+ * unique. In rare cases of collision, we will find out another number
+ * not in use.
+ *
+ * Note that, since this code only runs in a Hyper-V VM, Hyper-V
+ * together with this guest driver can guarantee that (1) The only
+ * domain used by Gen1 VMs for something that looks like a physical
+ * PCI bus (which is actually emulated by the hypervisor) is domain 0.
+ * (2) There will be no overlap between domains (after fixing possible
+ * collisions) in the same VM.
+ */
+ dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
+ dom = hv_get_dom_num(dom_req);
+
+ if (dom == HVPCI_DOM_INVALID) {
+ dev_err(&hdev->device,
+ "Unable to use dom# 0x%x or other numbers", dom_req);
+ ret = -EINVAL;
+ goto free_bus;
+ }
+
+ if (dom != dom_req)
+ dev_info(&hdev->device,
+ "PCI dom# 0x%x has collision, using 0x%x",
+ dom_req, dom);
+
+ hbus->bridge->domain_nr = dom;
+#ifdef CONFIG_X86
+ hbus->sysdata.domain = dom;
+ hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
+#elif defined(CONFIG_ARM64)
+ /*
+ * Set the PCI bus parent to be the corresponding VMbus
+ * device. Then the VMbus device will be assigned as the
+ * ACPI companion in pcibios_root_bridge_prepare() and
+ * pci_dma_configure() will propagate device coherence
+ * information to devices created on the bus.
+ */
+ hbus->sysdata.parent = hdev->device.parent;
+ hbus->use_calls = false;
+#endif
+
+ hbus->hdev = hdev;
+ INIT_LIST_HEAD(&hbus->children);
+ INIT_LIST_HEAD(&hbus->dr_list);
+ spin_lock_init(&hbus->config_lock);
+ spin_lock_init(&hbus->device_list_lock);
+ hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
+ hbus->bridge->domain_nr);
+ if (!hbus->wq) {
+ ret = -ENOMEM;
+ goto free_dom;
+ }
+
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ goto destroy_wq;
+
+ hv_set_drvdata(hdev, hbus);
+
+ ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+ ARRAY_SIZE(pci_protocol_versions));
+ if (ret)
+ goto close;
+
+ ret = hv_allocate_config_window(hbus);
+ if (ret)
+ goto close;
+
+ hbus->cfg_addr = ioremap(hbus->mem_config->start,
+ PCI_CONFIG_MMIO_LENGTH);
+ if (!hbus->cfg_addr) {
+ dev_err(&hdev->device,
+ "Unable to map a virtual address for config space\n");
+ ret = -ENOMEM;
+ goto free_config;
+ }
+
+ name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
+ if (!name) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ hbus->fwnode = irq_domain_alloc_named_fwnode(name);
+ kfree(name);
+ if (!hbus->fwnode) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ret = hv_pcie_init_irq_domain(hbus);
+ if (ret)
+ goto free_fwnode;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto free_irq_domain;
+
+ mutex_lock(&hbus->state_lock);
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto release_state_lock;
+
+ ret = hv_pci_allocate_bridge_windows(hbus);
+ if (ret)
+ goto exit_d0;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto free_windows;
+
+ prepopulate_bars(hbus);
+
+ hbus->state = hv_pcibus_probed;
+
+ ret = create_root_hv_pci_bus(hbus);
+ if (ret)
+ goto free_windows;
+
+ mutex_unlock(&hbus->state_lock);
+ return 0;
+
+free_windows:
+ hv_pci_free_bridge_windows(hbus);
+exit_d0:
+ (void) hv_pci_bus_exit(hdev, true);
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
+free_irq_domain:
+ irq_domain_remove(hbus->irq_domain);
+free_fwnode:
+ irq_domain_free_fwnode(hbus->fwnode);
+unmap:
+ iounmap(hbus->cfg_addr);
+free_config:
+ hv_free_config_window(hbus);
+close:
+ vmbus_close(hdev->channel);
+destroy_wq:
+ destroy_workqueue(hbus->wq);
+free_dom:
+ hv_put_dom_num(hbus->bridge->domain_nr);
+free_bus:
+ kfree(hbus);
+ return ret;
+}
+
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct vmbus_channel *chan = hdev->channel;
+ struct {
+ struct pci_packet teardown_packet;
+ u8 buffer[sizeof(struct pci_message)];
+ } pkt;
+ struct hv_pci_compl comp_pkt;
+ struct hv_pci_dev *hpdev, *tmp;
+ unsigned long flags;
+ u64 trans_id;
+ int ret;
+
+ /*
+ * After the host sends the RESCIND_CHANNEL message, it doesn't
+ * access the per-channel ringbuffer any longer.
+ */
+ if (chan->rescind)
+ return 0;
+
+ if (!keep_devs) {
+ struct list_head removed;
+
+ /* Move all present children to the list on stack */
+ INIT_LIST_HEAD(&removed);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
+ list_move_tail(&hpdev->list_entry, &removed);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Remove all children in the list */
+ list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
+ list_del(&hpdev->list_entry);
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+ /* For the two refs got in new_pcichild_device() */
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
+ }
+ }
+
+ ret = hv_send_resources_released(hdev);
+ if (ret) {
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
+ return ret;
+ }
+
+ memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
+ init_completion(&comp_pkt.host_event);
+ pkt.teardown_packet.completion_func = hv_pci_generic_compl;
+ pkt.teardown_packet.compl_ctxt = &comp_pkt;
+ pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+
+ ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
+ sizeof(struct pci_message),
+ (unsigned long)&pkt.teardown_packet,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
+ /*
+ * The completion packet on the stack becomes invalid after
+ * 'return'; remove the ID from the VMbus requestor if the
+ * identifier is still mapped to/associated with the packet.
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(chan, trans_id,
+ (unsigned long)&pkt.teardown_packet);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ */
+static void hv_pci_remove(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus;
+
+ hbus = hv_get_drvdata(hdev);
+ if (hbus->state == hv_pcibus_installed) {
+ tasklet_disable(&hdev->channel->callback_event);
+ hbus->state = hv_pcibus_removing;
+ tasklet_enable(&hdev->channel->callback_event);
+ destroy_workqueue(hbus->wq);
+ hbus->wq = NULL;
+ /*
+ * At this point, no work is running or can be scheduled
+ * on hbus-wq. We can't race with hv_pci_devices_present()
+ * or hv_pci_eject_device(), it's safe to proceed.
+ */
+
+ /* Remove the bus from PCI's point of view. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(hbus->bridge->bus);
+ hv_pci_remove_slots(hbus);
+ pci_remove_root_bus(hbus->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ hv_pci_bus_exit(hdev, false);
+
+ vmbus_close(hdev->channel);
+
+ iounmap(hbus->cfg_addr);
+ hv_free_config_window(hbus);
+ hv_pci_free_bridge_windows(hbus);
+ irq_domain_remove(hbus->irq_domain);
+ irq_domain_free_fwnode(hbus->fwnode);
+
+ hv_put_dom_num(hbus->bridge->domain_nr);
+
+ kfree(hbus);
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum hv_pcibus_state old_state;
+ int ret;
+
+ /*
+ * hv_pci_suspend() must make sure there are no pending work items
+ * before calling vmbus_close(), since it runs in a process context
+ * as a callback in dpm_suspend(). When it starts to run, the channel
+ * callback hv_pci_onchannelcallback(), which runs in a tasklet
+ * context, can be still running concurrently and scheduling new work
+ * items onto hbus->wq in hv_pci_devices_present() and
+ * hv_pci_eject_device(), and the work item handlers can access the
+ * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+ * the work item handler pci_devices_present_work() ->
+ * new_pcichild_device() writes to the vmbus channel.
+ *
+ * To eliminate the race, hv_pci_suspend() disables the channel
+ * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+ * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+ * it knows that no new work item can be scheduled, and then it flushes
+ * hbus->wq and safely closes the vmbus channel.
+ */
+ tasklet_disable(&hdev->channel->callback_event);
+
+ /* Change the hbus state to prevent new work items. */
+ old_state = hbus->state;
+ if (hbus->state == hv_pcibus_installed)
+ hbus->state = hv_pcibus_removing;
+
+ tasklet_enable(&hdev->channel->callback_event);
+
+ if (old_state != hv_pcibus_installed)
+ return -EINVAL;
+
+ flush_workqueue(hbus->wq);
+
+ ret = hv_pci_bus_exit(hdev, true);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
+ return 0;
+}
+
+static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
+{
+ struct irq_data *irq_data;
+ struct msi_desc *entry;
+ int ret = 0;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return 0;
+
+ msi_lock_descs(&pdev->dev);
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
+ irq_data = irq_get_irq_data(entry->irq);
+ if (WARN_ON_ONCE(!irq_data)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ hv_compose_msi_msg(irq_data, &entry->msg);
+ }
+ msi_unlock_descs(&pdev->dev);
+
+ return ret;
+}
+
+/*
+ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
+ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
+ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
+ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
+ * Table entries.
+ */
+static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
+{
+ pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
+}
+
+static int hv_pci_resume(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum pci_protocol_version_t version[1];
+ int ret;
+
+ hbus->state = hv_pcibus_init;
+
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ return ret;
+
+ /* Only use the version that was in use before hibernation. */
+ version[0] = hbus->protocol_version;
+ ret = hv_pci_protocol_negotiation(hdev, version, 1);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto out;
+
+ mutex_lock(&hbus->state_lock);
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto release_state_lock;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto release_state_lock;
+
+ prepopulate_bars(hbus);
+
+ hv_pci_restore_msi_state(hbus);
+
+ hbus->state = hv_pcibus_installed;
+ mutex_unlock(&hbus->state_lock);
+ return 0;
+
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
+out:
+ vmbus_close(hdev->channel);
+ return ret;
+}
+
+static const struct hv_vmbus_device_id hv_pci_id_table[] = {
+ /* PCI Pass-through Class ID */
+ /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
+ { HV_PCIE_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
+
+static struct hv_driver hv_pci_drv = {
+ .name = "hv_pci",
+ .id_table = hv_pci_id_table,
+ .probe = hv_pci_probe,
+ .remove = hv_pci_remove,
+ .suspend = hv_pci_suspend,
+ .resume = hv_pci_resume,
+};
+
+static void __exit exit_hv_pci_drv(void)
+{
+ vmbus_driver_unregister(&hv_pci_drv);
+
+ hvpci_block_ops.read_block = NULL;
+ hvpci_block_ops.write_block = NULL;
+ hvpci_block_ops.reg_blk_invalidate = NULL;
+}
+
+static int __init init_hv_pci_drv(void)
+{
+ int ret;
+
+ if (!hv_is_hyperv_initialized())
+ return -ENODEV;
+
+ ret = hv_pci_irqchip_init();
+ if (ret)
+ return ret;
+
+ /* Set the invalid domain number's bit, so it will not be used */
+ set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
+
+ /* Initialize PCI block r/w interface */
+ hvpci_block_ops.read_block = hv_read_config_block;
+ hvpci_block_ops.write_block = hv_write_config_block;
+ hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
+
+ return vmbus_driver_register(&hv_pci_drv);
+}
+
+module_init(init_hv_pci_drv);
+module_exit(exit_hv_pci_drv);
+
+MODULE_DESCRIPTION("Hyper-V PCI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c
new file mode 100644
index 000000000..acb85e0d5
--- /dev/null
+++ b/drivers/pci/controller/pci-ixp4xx.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel IXP4xx PCI host controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the IXP4xx arch/arm/mach-ixp4xx/common-pci.c driver
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003 Greg Ungerer <gerg@linux-m68k.org>
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ * Copyright (C) 2005 Deepak Saxena <dsaxena@plexity.net>
+ * Copyright (C) 2005 Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * TODO:
+ * - Test IO-space access
+ * - DMA support
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bits.h>
+#include "../pci.h"
+
+/* Register offsets */
+#define IXP4XX_PCI_NP_AD 0x00
+#define IXP4XX_PCI_NP_CBE 0x04
+#define IXP4XX_PCI_NP_WDATA 0x08
+#define IXP4XX_PCI_NP_RDATA 0x0c
+#define IXP4XX_PCI_CRP_AD_CBE 0x10
+#define IXP4XX_PCI_CRP_WDATA 0x14
+#define IXP4XX_PCI_CRP_RDATA 0x18
+#define IXP4XX_PCI_CSR 0x1c
+#define IXP4XX_PCI_ISR 0x20
+#define IXP4XX_PCI_INTEN 0x24
+#define IXP4XX_PCI_DMACTRL 0x28
+#define IXP4XX_PCI_AHBMEMBASE 0x2c
+#define IXP4XX_PCI_AHBIOBASE 0x30
+#define IXP4XX_PCI_PCIMEMBASE 0x34
+#define IXP4XX_PCI_AHBDOORBELL 0x38
+#define IXP4XX_PCI_PCIDOORBELL 0x3c
+#define IXP4XX_PCI_ATPDMA0_AHBADDR 0x40
+#define IXP4XX_PCI_ATPDMA0_PCIADDR 0x44
+#define IXP4XX_PCI_ATPDMA0_LENADDR 0x48
+#define IXP4XX_PCI_ATPDMA1_AHBADDR 0x4c
+#define IXP4XX_PCI_ATPDMA1_PCIADDR 0x50
+#define IXP4XX_PCI_ATPDMA1_LENADDR 0x54
+
+/* CSR bit definitions */
+#define IXP4XX_PCI_CSR_HOST BIT(0)
+#define IXP4XX_PCI_CSR_ARBEN BIT(1)
+#define IXP4XX_PCI_CSR_ADS BIT(2)
+#define IXP4XX_PCI_CSR_PDS BIT(3)
+#define IXP4XX_PCI_CSR_ABE BIT(4)
+#define IXP4XX_PCI_CSR_DBT BIT(5)
+#define IXP4XX_PCI_CSR_ASE BIT(8)
+#define IXP4XX_PCI_CSR_IC BIT(15)
+#define IXP4XX_PCI_CSR_PRST BIT(16)
+
+/* ISR (Interrupt status) Register bit definitions */
+#define IXP4XX_PCI_ISR_PSE BIT(0)
+#define IXP4XX_PCI_ISR_PFE BIT(1)
+#define IXP4XX_PCI_ISR_PPE BIT(2)
+#define IXP4XX_PCI_ISR_AHBE BIT(3)
+#define IXP4XX_PCI_ISR_APDC BIT(4)
+#define IXP4XX_PCI_ISR_PADC BIT(5)
+#define IXP4XX_PCI_ISR_ADB BIT(6)
+#define IXP4XX_PCI_ISR_PDB BIT(7)
+
+/* INTEN (Interrupt Enable) Register bit definitions */
+#define IXP4XX_PCI_INTEN_PSE BIT(0)
+#define IXP4XX_PCI_INTEN_PFE BIT(1)
+#define IXP4XX_PCI_INTEN_PPE BIT(2)
+#define IXP4XX_PCI_INTEN_AHBE BIT(3)
+#define IXP4XX_PCI_INTEN_APDC BIT(4)
+#define IXP4XX_PCI_INTEN_PADC BIT(5)
+#define IXP4XX_PCI_INTEN_ADB BIT(6)
+#define IXP4XX_PCI_INTEN_PDB BIT(7)
+
+/* Shift value for byte enable on NP cmd/byte enable register */
+#define IXP4XX_PCI_NP_CBE_BESL 4
+
+/* PCI commands supported by NP access unit */
+#define NP_CMD_IOREAD 0x2
+#define NP_CMD_IOWRITE 0x3
+#define NP_CMD_CONFIGREAD 0xa
+#define NP_CMD_CONFIGWRITE 0xb
+#define NP_CMD_MEMREAD 0x6
+#define NP_CMD_MEMWRITE 0x7
+
+/* Constants for CRP access into local config space */
+#define CRP_AD_CBE_BESL 20
+#define CRP_AD_CBE_WRITE 0x00010000
+
+/* Special PCI configuration space registers for this controller */
+#define IXP4XX_PCI_RTOTTO 0x40
+
+struct ixp4xx_pci {
+ struct device *dev;
+ void __iomem *base;
+ bool errata_hammer;
+ bool host_mode;
+};
+
+/*
+ * The IXP4xx has a peculiar address bus that will change the
+ * byte order on SoC peripherals depending on whether the device
+ * operates in big-endian or little-endian mode. That means that
+ * readl() and writel() that always use little-endian access
+ * will not work for SoC peripherals such as the PCI controller
+ * when used in big-endian mode. The accesses to the individual
+ * PCI devices on the other hand, are always little-endian and
+ * can use readl() and writel().
+ *
+ * For local AHB bus access we need to use __raw_[readl|writel]()
+ * to make sure that we access the SoC devices in the CPU native
+ * endianness.
+ */
+static inline u32 ixp4xx_readl(struct ixp4xx_pci *p, u32 reg)
+{
+ return __raw_readl(p->base + reg);
+}
+
+static inline void ixp4xx_writel(struct ixp4xx_pci *p, u32 reg, u32 val)
+{
+ __raw_writel(val, p->base + reg);
+}
+
+static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
+{
+ u32 isr = ixp4xx_readl(p, IXP4XX_PCI_ISR);
+
+ if (isr & IXP4XX_PCI_ISR_PFE) {
+ /* Make sure the master abort bit is reset */
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE);
+ dev_dbg(p->dev, "master abort detected\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+{
+ ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
+
+ if (p->errata_hammer) {
+ int i;
+
+ /*
+ * PCI workaround - only works if NP PCI space reads have
+ * no side effects. Hammer the register and read twice 8
+ * times. last one will be good.
+ */
+ for (i = 0; i < 8; i++) {
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ }
+ } else {
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ }
+
+ return ixp4xx_pci_check_master_abort(p);
+}
+
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+{
+ ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
+
+ /* Set up the write */
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+
+ /* Execute the write by writing to NP_WDATA */
+ ixp4xx_writel(p, IXP4XX_PCI_NP_WDATA, data);
+
+ return ixp4xx_pci_check_master_abort(p);
+}
+
+static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where)
+{
+ /* Root bus is always 0 in this hardware */
+ if (bus_num == 0) {
+ /* type 0 */
+ return (PCI_CONF1_ADDRESS(0, 0, PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE) | BIT(32-PCI_SLOT(devfn));
+ } else {
+ /* type 1 */
+ return (PCI_CONF1_ADDRESS(bus_num, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE) | 1;
+ }
+}
+
+/*
+ * CRP functions are "Controller Configuration Port" accesses
+ * initiated from within this driver itself to read/write PCI
+ * control information in the config space.
+ */
+static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size)
+{
+ if (size == 1)
+ return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL;
+ if (size == 2)
+ return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL;
+ if (size == 4)
+ return 0;
+ return 0xffffffff;
+}
+
+static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
+ u32 *value)
+{
+ u32 n, cmd, val;
+
+ n = where % 4;
+ cmd = where & ~3;
+
+ dev_dbg(p->dev, "%s from %d size %d cmd %08x\n",
+ __func__, where, size, cmd);
+
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd);
+ val = ixp4xx_readl(p, IXP4XX_PCI_CRP_RDATA);
+
+ val >>= (8*n);
+ switch (size) {
+ case 1:
+ val &= U8_MAX;
+ dev_dbg(p->dev, "%s read byte %02x\n", __func__, val);
+ break;
+ case 2:
+ val &= U16_MAX;
+ dev_dbg(p->dev, "%s read word %04x\n", __func__, val);
+ break;
+ case 4:
+ val &= U32_MAX;
+ dev_dbg(p->dev, "%s read long %08x\n", __func__, val);
+ break;
+ default:
+ /* Should not happen */
+ dev_err(p->dev, "%s illegal size\n", __func__);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *value = val;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size,
+ u32 value)
+{
+ u32 n, cmd, val;
+
+ n = where % 4;
+ cmd = ixp4xx_crp_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ cmd |= where & ~3;
+ cmd |= CRP_AD_CBE_WRITE;
+
+ val = value << (8*n);
+
+ dev_dbg(p->dev, "%s to %d size %d cmd %08x val %08x\n",
+ __func__, where, size, cmd, val);
+
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd);
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_WDATA, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Then follows the functions that read and write from the common PCI
+ * configuration space.
+ */
+static u32 ixp4xx_byte_lane_enable_bits(u32 n, int size)
+{
+ if (size == 1)
+ return (0xf & ~BIT(n)) << 4;
+ if (size == 2)
+ return (0xf & ~(BIT(n) | BIT(n+1))) << 4;
+ if (size == 4)
+ return 0;
+ return 0xffffffff;
+}
+
+static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct ixp4xx_pci *p = bus->sysdata;
+ u32 n, addr, val, cmd;
+ u8 bus_num = bus->number;
+ int ret;
+
+ *value = 0xffffffff;
+ n = where % 4;
+ cmd = ixp4xx_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ixp4xx_config_addr(bus_num, devfn, where);
+ cmd |= NP_CMD_CONFIGREAD;
+ dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
+ where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
+
+ ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ val >>= (8*n);
+ switch (size) {
+ case 1:
+ val &= U8_MAX;
+ dev_dbg(p->dev, "%s read byte %02x\n", __func__, val);
+ break;
+ case 2:
+ val &= U16_MAX;
+ dev_dbg(p->dev, "%s read word %04x\n", __func__, val);
+ break;
+ case 4:
+ val &= U32_MAX;
+ dev_dbg(p->dev, "%s read long %08x\n", __func__, val);
+ break;
+ default:
+ /* Should not happen */
+ dev_err(p->dev, "%s illegal size\n", __func__);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *value = val;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct ixp4xx_pci *p = bus->sysdata;
+ u32 n, addr, val, cmd;
+ u8 bus_num = bus->number;
+ int ret;
+
+ n = where % 4;
+ cmd = ixp4xx_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ixp4xx_config_addr(bus_num, devfn, where);
+ cmd |= NP_CMD_CONFIGWRITE;
+ val = value << (8*n);
+
+ dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
+ value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
+
+ ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ixp4xx_pci_ops = {
+ .read = ixp4xx_pci_read_config,
+ .write = ixp4xx_pci_write_config,
+};
+
+static u32 ixp4xx_pci_addr_to_64mconf(phys_addr_t addr)
+{
+ u8 base;
+
+ base = ((addr & 0xff000000) >> 24);
+ return (base << 24) | ((base + 1) << 16)
+ | ((base + 2) << 8) | (base + 3);
+}
+
+static int ixp4xx_pci_parse_map_ranges(struct ixp4xx_pci *p)
+{
+ struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *win;
+ struct resource *res;
+ phys_addr_t addr;
+
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ u32 pcimembase;
+
+ res = win->res;
+ addr = res->start - win->offset;
+
+ if (res->flags & IORESOURCE_PREFETCH)
+ res->name = "IXP4xx PCI PRE-MEM";
+ else
+ res->name = "IXP4xx PCI NON-PRE-MEM";
+
+ dev_dbg(dev, "%s window %pR, bus addr %pa\n",
+ res->name, res, &addr);
+ if (resource_size(res) != SZ_64M) {
+ dev_err(dev, "memory range is not 64MB\n");
+ return -EINVAL;
+ }
+
+ pcimembase = ixp4xx_pci_addr_to_64mconf(addr);
+ /* Commit configuration */
+ ixp4xx_writel(p, IXP4XX_PCI_PCIMEMBASE, pcimembase);
+ } else {
+ dev_err(dev, "no AHB memory mapping defined\n");
+ }
+
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ res = win->res;
+
+ addr = pci_pio_to_address(res->start);
+ if (addr & 0xff) {
+ dev_err(dev, "IO mem at uneven address: %pa\n", &addr);
+ return -EINVAL;
+ }
+
+ res->name = "IXP4xx PCI IO MEM";
+ /*
+ * Setup I/O space location for PCI->AHB access, the
+ * upper 24 bits of the address goes into the lower
+ * 24 bits of this register.
+ */
+ ixp4xx_writel(p, IXP4XX_PCI_AHBIOBASE, (addr >> 8));
+ } else {
+ dev_info(dev, "no IO space AHB memory mapping defined\n");
+ }
+
+ return 0;
+}
+
+static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p)
+{
+ struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *win;
+ struct resource *res;
+ phys_addr_t addr;
+ u32 ahbmembase;
+
+ win = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
+ if (win) {
+ res = win->res;
+ addr = res->start - win->offset;
+
+ if (resource_size(res) != SZ_64M) {
+ dev_err(dev, "DMA memory range is not 64MB\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "DMA MEM BASE: %pa\n", &addr);
+ /*
+ * 4 PCI-to-AHB windows of 16 MB each, write the 8 high bits
+ * into each byte of the PCI_AHBMEMBASE register.
+ */
+ ahbmembase = ixp4xx_pci_addr_to_64mconf(addr);
+ /* Commit AHB membase */
+ ixp4xx_writel(p, IXP4XX_PCI_AHBMEMBASE, ahbmembase);
+ } else {
+ dev_err(dev, "no DMA memory range defined\n");
+ }
+
+ return 0;
+}
+
+/* Only used to get context for abort handling */
+static struct ixp4xx_pci *ixp4xx_pci_abort_singleton;
+
+static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ struct ixp4xx_pci *p = ixp4xx_pci_abort_singleton;
+ u32 isr, status;
+ int ret;
+
+ isr = ixp4xx_readl(p, IXP4XX_PCI_ISR);
+ ret = ixp4xx_crp_read_config(p, PCI_STATUS, 2, &status);
+ if (ret) {
+ dev_err(p->dev, "unable to read abort status\n");
+ return -EINVAL;
+ }
+
+ dev_err(p->dev,
+ "PCI: abort_handler addr = %#lx, isr = %#x, status = %#x\n",
+ addr, isr, status);
+
+ /* Make sure the Master Abort bit is reset */
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE);
+ status |= PCI_STATUS_REC_MASTER_ABORT;
+ ret = ixp4xx_crp_write_config(p, PCI_STATUS, 2, status);
+ if (ret)
+ dev_err(p->dev, "unable to clear abort status bit\n");
+
+ /*
+ * If it was an imprecise abort, then we need to correct the
+ * return address to be _after_ the instruction.
+ */
+ if (fsr & (1 << 10)) {
+ dev_err(p->dev, "imprecise abort\n");
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static int __init ixp4xx_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ixp4xx_pci *p;
+ struct pci_host_bridge *host;
+ int ret;
+ u32 val;
+ phys_addr_t addr;
+ u32 basereg[4] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ };
+ int i;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
+ if (!host)
+ return -ENOMEM;
+
+ host->ops = &ixp4xx_pci_ops;
+ p = pci_host_bridge_priv(host);
+ host->sysdata = p;
+ p->dev = dev;
+ dev_set_drvdata(dev, p);
+
+ /*
+ * Set up quirk for erratic behaviour in the 42x variant
+ * when accessing config space.
+ */
+ if (of_device_is_compatible(np, "intel,ixp42x-pci")) {
+ p->errata_hammer = true;
+ dev_info(dev, "activate hammering errata\n");
+ }
+
+ p->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
+
+ val = ixp4xx_readl(p, IXP4XX_PCI_CSR);
+ p->host_mode = !!(val & IXP4XX_PCI_CSR_HOST);
+ dev_info(dev, "controller is in %s mode\n",
+ p->host_mode ? "host" : "option");
+
+ /* Hook in our fault handler for PCI errors */
+ ixp4xx_pci_abort_singleton = p;
+ hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ ret = ixp4xx_pci_parse_map_ranges(p);
+ if (ret)
+ return ret;
+
+ ret = ixp4xx_pci_parse_map_dma_ranges(p);
+ if (ret)
+ return ret;
+
+ /* This is only configured in host mode */
+ if (p->host_mode) {
+ addr = __pa(PAGE_OFFSET);
+ /* This is a noop (0x00) but explains what is going on */
+ addr |= PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ for (i = 0; i < 4; i++) {
+ /* Write this directly into the config space */
+ ret = ixp4xx_crp_write_config(p, basereg[i], 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_%d\n", i);
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_%d to %pa\n", i, &addr);
+ addr += SZ_16M;
+ }
+
+ /*
+ * Enable CSR window at 64 MiB to allow PCI masters to continue
+ * prefetching past the 64 MiB boundary, if all AHB to PCI
+ * windows are consecutive.
+ */
+ ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_4, 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_4\n");
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_4 to %pa\n", &addr);
+
+ /*
+ * Put the IO memory window at the very end of physical memory
+ * at 0xfffffc00. This is when the system is trying to access IO
+ * memory over AHB.
+ */
+ addr = 0xfffffc00;
+ addr |= PCI_BASE_ADDRESS_SPACE_IO;
+ ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_5, 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_5\n");
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_5 to %pa\n", &addr);
+
+ /*
+ * Retry timeout to 0x80
+ * Transfer ready timeout to 0xff
+ */
+ ret = ixp4xx_crp_write_config(p, IXP4XX_PCI_RTOTTO, 4,
+ 0x000080ff);
+ if (ret)
+ dev_err(dev, "failed to set up TRDY limit\n");
+ else
+ dev_info(dev, "set TRDY limit to 0x80ff\n");
+ }
+
+ /* Clear interrupts */
+ val = IXP4XX_PCI_ISR_PSE | IXP4XX_PCI_ISR_PFE | IXP4XX_PCI_ISR_PPE | IXP4XX_PCI_ISR_AHBE;
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, val);
+
+ /*
+ * Set Initialize Complete in PCI Control Register: allow IXP4XX to
+ * generate PCI configuration cycles. Specify that the AHB bus is
+ * operating in big-endian mode. Set up byte lane swapping between
+ * little-endian PCI and the big-endian AHB bus.
+ */
+ val = IXP4XX_PCI_CSR_IC | IXP4XX_PCI_CSR_ABE;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ val |= (IXP4XX_PCI_CSR_PDS | IXP4XX_PCI_CSR_ADS);
+ ixp4xx_writel(p, IXP4XX_PCI_CSR, val);
+
+ ret = ixp4xx_crp_write_config(p, PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
+ if (ret)
+ dev_err(dev, "unable to initialize master and command memory\n");
+ else
+ dev_info(dev, "initialized as master\n");
+
+ pci_host_probe(host);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_pci_of_match[] = {
+ {
+ .compatible = "intel,ixp42x-pci",
+ },
+ {
+ .compatible = "intel,ixp43x-pci",
+ },
+ {},
+};
+
+/*
+ * This driver needs to be a builtin module with suppressed bind
+ * attributes since the probe() is initializing a hard exception
+ * handler and this can only be done from __init-tagged code
+ * sections. This module cannot be removed and inserted at all.
+ */
+static struct platform_driver ixp4xx_pci_driver = {
+ .driver = {
+ .name = "ixp4xx-pci",
+ .suppress_bind_attrs = true,
+ .of_match_table = ixp4xx_pci_of_match,
+ },
+};
+builtin_platform_driver_probe(ixp4xx_pci_driver, ixp4xx_pci_probe);
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
new file mode 100644
index 000000000..8b34ccff0
--- /dev/null
+++ b/drivers/pci/controller/pci-loongson.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson PCI Host Controller Driver
+ *
+ * Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "../pci.h"
+
+/* Device IDs */
+#define DEV_LS2K_PCIE_PORT0 0x1a05
+#define DEV_LS7A_PCIE_PORT0 0x7a09
+#define DEV_LS7A_PCIE_PORT1 0x7a19
+#define DEV_LS7A_PCIE_PORT2 0x7a29
+#define DEV_LS7A_PCIE_PORT3 0x7a39
+#define DEV_LS7A_PCIE_PORT4 0x7a49
+#define DEV_LS7A_PCIE_PORT5 0x7a59
+#define DEV_LS7A_PCIE_PORT6 0x7a69
+
+#define DEV_LS2K_APB 0x7a02
+#define DEV_LS7A_GMAC 0x7a03
+#define DEV_LS7A_DC1 0x7a06
+#define DEV_LS7A_LPC 0x7a0c
+#define DEV_LS7A_AHCI 0x7a08
+#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GNET 0x7a13
+#define DEV_LS7A_EHCI 0x7a14
+#define DEV_LS7A_DC2 0x7a36
+#define DEV_LS7A_HDMI 0x7a37
+
+#define FLAG_CFG0 BIT(0)
+#define FLAG_CFG1 BIT(1)
+#define FLAG_DEV_FIX BIT(2)
+#define FLAG_DEV_HIDDEN BIT(3)
+
+struct loongson_pci_data {
+ u32 flags;
+ struct pci_ops *ops;
+};
+
+struct loongson_pci {
+ void __iomem *cfg0_base;
+ void __iomem *cfg1_base;
+ struct platform_device *pdev;
+ const struct loongson_pci_data *data;
+};
+
+/* Fixup wrong class code in PCIe bridges */
+static void bridge_class_quirk(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
+
+static void system_bus_quirk(struct pci_dev *pdev)
+{
+ /*
+ * The address space consumed by these devices is outside the
+ * resources of the host bridge.
+ */
+ pdev->mmio_always_on = 1;
+ pdev->non_compliant_bars = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS2K_APB, system_bus_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_CONF, system_bus_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_LPC, system_bus_quirk);
+
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this. Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_bus *bus = pdev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id bridge_devids[] = {
+ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
+ { 0, },
+ };
+
+ /* look for the matching bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+
+ if (pci_match_id(bridge_devids, bridge)) {
+ if (pcie_get_readrq(pdev) > 256) {
+ pci_info(pdev, "limiting MRRS to 256\n");
+ pcie_set_readrq(pdev, 256);
+ }
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
+static void loongson_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+
+ bridge->no_inc_mrrs = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
+
+static void loongson_pci_pin_quirk(struct pci_dev *pdev)
+{
+ pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC1, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC2, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GMAC, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_AHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_EHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GNET, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+
+static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
+
+ if (acpi_disabled)
+ return (struct loongson_pci *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct loongson_pci *)(cfg->priv);
+}
+
+static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned long addroff = 0x0;
+ unsigned char busnum = bus->number;
+
+ if (!pci_is_root_bus(bus)) {
+ addroff |= BIT(24); /* Type 1 Access */
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | where;
+ return priv->cfg0_base + addroff;
+}
+
+static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned long addroff = 0x0;
+ unsigned char busnum = bus->number;
+
+ if (!pci_is_root_bus(bus)) {
+ addroff |= BIT(28); /* Type 1 Access */
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16);
+ return priv->cfg1_base + addroff;
+}
+
+static bool pdev_may_exist(struct pci_bus *bus, unsigned int device,
+ unsigned int function)
+{
+ return !(pci_is_root_bus(bus) &&
+ (device >= 9 && device <= 20) && (function > 0));
+}
+
+static void __iomem *pci_loongson_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int function = PCI_FUNC(devfn);
+ struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);
+
+ /*
+ * Do not read more than one device on the bus other than
+ * the host bus.
+ */
+ if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) {
+ if (!pci_is_root_bus(bus) && (device > 0))
+ return NULL;
+ }
+
+ /* Don't access non-existent devices */
+ if (priv->data->flags & FLAG_DEV_HIDDEN) {
+ if (!pdev_may_exist(bus, device, function))
+ return NULL;
+ }
+
+ /* CFG0 can only access standard space */
+ if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
+ return cfg0_map(priv, bus, devfn, where);
+
+ /* CFG1 can access extended space */
+ if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
+ return cfg1_map(priv, bus, devfn, where);
+
+ return NULL;
+}
+
+#ifdef CONFIG_OF
+
+static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq;
+ u8 val;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (irq > 0)
+ return irq;
+
+ /* Care i8259 legacy systems */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &val);
+ /* i8259 only have 15 IRQs */
+ if (val > 15)
+ return 0;
+
+ return val;
+}
+
+/* LS2K/LS7A accept 8/16/32-bit PCI config operations */
+static struct pci_ops loongson_pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* RS780/SR5690 only accept 32-bit PCI config operations */
+static struct pci_ops loongson_pci_ops32 = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static const struct loongson_pci_data ls2k_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data ls7a_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data rs780e_pci_data = {
+ .flags = FLAG_CFG0,
+ .ops = &loongson_pci_ops32,
+};
+
+static const struct of_device_id loongson_pci_of_match[] = {
+ { .compatible = "loongson,ls2k-pci",
+ .data = &ls2k_pci_data, },
+ { .compatible = "loongson,ls7a-pci",
+ .data = &ls7a_pci_data, },
+ { .compatible = "loongson,rs780e-pci",
+ .data = &rs780e_pci_data, },
+ {}
+};
+
+static int loongson_pci_probe(struct platform_device *pdev)
+{
+ struct loongson_pci *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource *regs;
+
+ if (!node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv));
+ if (!bridge)
+ return -ENODEV;
+
+ priv = pci_host_bridge_priv(bridge);
+ priv->pdev = pdev;
+ priv->data = of_device_get_match_data(dev);
+
+ if (priv->data->flags & FLAG_CFG0) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ dev_err(dev, "missing mem resources for cfg0\n");
+ else {
+ priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg0_base))
+ return PTR_ERR(priv->cfg0_base);
+ }
+ }
+
+ if (priv->data->flags & FLAG_CFG1) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs)
+ dev_info(dev, "missing mem resource for cfg1\n");
+ else {
+ priv->cfg1_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg1_base))
+ priv->cfg1_base = NULL;
+ }
+ }
+
+ bridge->sysdata = priv;
+ bridge->ops = priv->data->ops;
+ bridge->map_irq = loongson_map_irq;
+
+ return pci_host_probe(bridge);
+}
+
+static struct platform_driver loongson_pci_driver = {
+ .driver = {
+ .name = "loongson-pci",
+ .of_match_table = loongson_pci_of_match,
+ },
+ .probe = loongson_pci_probe,
+};
+builtin_platform_driver(loongson_pci_driver);
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static int loongson_pci_ecam_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct loongson_pci *priv;
+ struct loongson_pci_data *data;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ cfg->priv = priv;
+ data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN;
+ priv->data = data;
+ priv->cfg1_base = cfg->win - (cfg->busr.start << 16);
+
+ return 0;
+}
+
+const struct pci_ecam_ops loongson_pci_ecam_ops = {
+ .bus_shift = 16,
+ .init = loongson_pci_ecam_init,
+ .pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
new file mode 100644
index 000000000..29fe09c99
--- /dev/null
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Marvell Armada 370 and Armada XP SoCs
+ *
+ * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+
+#include "../pci.h"
+#include "../pci-bridge-emul.h"
+
+/*
+ * PCIe unit register offsets.
+ */
+#define PCIE_DEV_ID_OFF 0x0000
+#define PCIE_CMD_OFF 0x0004
+#define PCIE_DEV_REV_OFF 0x0008
+#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
+#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_SSDEV_ID_OFF 0x002c
+#define PCIE_CAP_PCIEXP 0x0060
+#define PCIE_CAP_PCIERR_OFF 0x0100
+#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
+#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
+#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
+#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
+#define PCIE_WIN5_CTRL_OFF 0x1880
+#define PCIE_WIN5_BASE_OFF 0x1884
+#define PCIE_WIN5_REMAP_OFF 0x188c
+#define PCIE_CONF_ADDR_OFF 0x18f8
+#define PCIE_CONF_ADDR_EN 0x80000000
+#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
+#define PCIE_CONF_ADDR(bus, devfn, where) \
+ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
+ PCIE_CONF_ADDR_EN)
+#define PCIE_CONF_DATA_OFF 0x18fc
+#define PCIE_INT_CAUSE_OFF 0x1900
+#define PCIE_INT_UNMASK_OFF 0x1910
+#define PCIE_INT_INTX(i) BIT(24+i)
+#define PCIE_INT_PM_PME BIT(28)
+#define PCIE_INT_ALL_MASK GENMASK(31, 0)
+#define PCIE_CTRL_OFF 0x1a00
+#define PCIE_CTRL_X1_MODE 0x0001
+#define PCIE_CTRL_RC_MODE BIT(1)
+#define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
+#define PCIE_STAT_OFF 0x1a04
+#define PCIE_STAT_BUS 0xff00
+#define PCIE_STAT_DEV 0x1f0000
+#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_SSPL_OFF 0x1a0c
+#define PCIE_SSPL_VALUE_SHIFT 0
+#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
+#define PCIE_SSPL_SCALE_SHIFT 8
+#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
+#define PCIE_SSPL_ENABLE BIT(16)
+#define PCIE_RC_RTSTA 0x1a14
+#define PCIE_DEBUG_CTRL 0x1a60
+#define PCIE_DEBUG_SOFT_RESET BIT(20)
+
+struct mvebu_pcie_port;
+
+/* Structure representing all PCIe interfaces */
+struct mvebu_pcie {
+ struct platform_device *pdev;
+ struct mvebu_pcie_port *ports;
+ struct resource io;
+ struct resource realio;
+ struct resource mem;
+ int nports;
+};
+
+struct mvebu_pcie_window {
+ phys_addr_t base;
+ phys_addr_t remap;
+ size_t size;
+};
+
+/* Structure representing one PCIe interface */
+struct mvebu_pcie_port {
+ char *name;
+ void __iomem *base;
+ u32 port;
+ u32 lane;
+ bool is_x4;
+ int devfn;
+ unsigned int mem_target;
+ unsigned int mem_attr;
+ unsigned int io_target;
+ unsigned int io_attr;
+ struct clk *clk;
+ struct gpio_desc *reset_gpio;
+ char *reset_name;
+ struct pci_bridge_emul bridge;
+ struct device_node *dn;
+ struct mvebu_pcie *pcie;
+ struct mvebu_pcie_window memwin;
+ struct mvebu_pcie_window iowin;
+ u32 saved_pcie_stat;
+ struct resource regs;
+ u8 slot_power_limit_value;
+ u8 slot_power_limit_scale;
+ struct irq_domain *intx_irq_domain;
+ raw_spinlock_t irq_lock;
+ int intx_irq;
+};
+
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+ return readl(port->base + reg);
+}
+
+static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
+{
+ return port->io_target != -1 && port->io_attr != -1;
+}
+
+static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
+{
+ return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+}
+
+static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
+{
+ return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
+}
+
+static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_BUS;
+ stat |= nr << 8;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_DEV;
+ stat |= nr << 16;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
+{
+ int i;
+
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
+
+ for (i = 1; i < 3; i++) {
+ mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
+ }
+
+ for (i = 0; i < 5; i++) {
+ mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ }
+
+ mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0] -> internal registers (needed for MSI)
+ * BAR[1] -> covers all DRAM banks
+ * BAR[2] -> Disabled
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+ const struct mbus_dram_target_info *dram;
+ u32 size;
+ int i;
+
+ dram = mv_mbus_dram_info();
+
+ /* First, disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
+
+ /* Setup windows for DDR banks. Count total DDR size on the fly. */
+ size = 0;
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvebu_writel(port, cs->base & 0xffff0000,
+ PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port,
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ PCIE_WIN04_CTRL_OFF(i));
+
+ size += cs->size;
+ }
+
+ /* Round up 'size' to the nearest power of two. */
+ if ((size & (size - 1)) != 0)
+ size = 1 << fls(size);
+
+ /* Setup BAR[1] to all DRAM banks. */
+ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+ mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+ PCIE_BAR_CTRL_OFF(1));
+
+ /*
+ * Point BAR[0] to the device's internal registers.
+ */
+ mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
+}
+
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+{
+ u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
+
+ /* Setup PCIe controller to Root Complex mode. */
+ ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ ctrl |= PCIE_CTRL_RC_MODE;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
+
+ /*
+ * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
+ * Capability register. This register is defined by PCIe specification
+ * as read-only but this mvebu controller has it as read-write and must
+ * be set to number of SerDes PCIe lanes (1 or 4). If this register is
+ * not set correctly then link with endpoint card is not established.
+ */
+ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /*
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
+ * because default value is Memory controller (0x5080).
+ *
+ * Note that this mvebu PCI Bridge does not have compliant Type 1
+ * Configuration Space. Header Type is reported as Type 0 and it
+ * has format of Type 0 config space.
+ *
+ * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
+ * have the same format in Marvell's specification as in PCIe
+ * specification, but their meaning is totally different and they do
+ * different things: they are aliased into internal mvebu registers
+ * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
+ * reconfigured by pci device drivers.
+ *
+ * Therefore driver uses emulation of PCI Bridge which emulates
+ * access to configuration space via internal mvebu registers or
+ * emulated configuration buffer. Driver access these PCI Bridge
+ * directly for simplification, but these registers can be accessed
+ * also via standard mvebu way for accessing PCI config space.
+ */
+ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
+ dev_rev &= ~0xffffff00;
+ dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
+
+ /* Point PCIe unit MBUS decode windows to DRAM space. */
+ mvebu_pcie_setup_wins(port);
+
+ /*
+ * Program Root Port to automatically send Set_Slot_Power_Limit
+ * PCIe Message when changing status from Dl_Down to Dl_Up and valid
+ * slot power limit was specified.
+ */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ if (port->slot_power_limit_value) {
+ sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
+ sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
+ sspl |= PCIE_SSPL_ENABLE;
+ }
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
+ /* Mask all interrupt sources. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
+
+ /* Clear all interrupt causes. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
+
+ /* Check if "intx" interrupt was specified in DT. */
+ if (port->intx_irq > 0)
+ return;
+
+ /*
+ * Fallback code when "intx" interrupt was not specified in DT:
+ * Unmask all legacy INTx interrupts as driver does not provide a way
+ * for masking and unmasking of individual legacy INTx interrupts.
+ * Legacy INTx are reported via one shared GIC source and therefore
+ * kernel cannot distinguish which individual legacy INTx was triggered.
+ * These interrupts are shared, so it should not cause any issue. Just
+ * performance penalty as every PCIe interrupt handler needs to be
+ * called when some interrupt is triggered.
+ */
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
+ PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
+}
+
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn);
+
+static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+ void __iomem *conf_data;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ conf_data = port->base + PCIE_CONF_DATA_OFF;
+
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+
+ switch (size) {
+ case 1:
+ *val = readb_relaxed(conf_data + (where & 3));
+ break;
+ case 2:
+ *val = readw_relaxed(conf_data + (where & 2));
+ break;
+ case 4:
+ *val = readl_relaxed(conf_data);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+ void __iomem *conf_data;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ conf_data = port->base + PCIE_CONF_DATA_OFF;
+
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+
+ switch (size) {
+ case 1:
+ writeb(val, conf_data + (where & 3));
+ break;
+ case 2:
+ writew(val, conf_data + (where & 2));
+ break;
+ case 4:
+ writel(val, conf_data);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops mvebu_pcie_child_ops = {
+ .read = mvebu_pcie_child_rd_conf,
+ .write = mvebu_pcie_child_wr_conf,
+};
+
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+ phys_addr_t base, size_t size)
+{
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+
+ mvebu_mbus_del_window(base, sz);
+ base += sz;
+ size -= sz;
+ }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ size_t size_mapped = 0;
+
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+ int ret;
+
+ ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ sz, remap);
+ if (ret) {
+ phys_addr_t end = base + sz - 1;
+
+ dev_err(&port->pcie->pdev->dev,
+ "Could not create MBus window at [mem %pa-%pa]: %d\n",
+ &base, &end, ret);
+ mvebu_pcie_del_windows(port, base - size_mapped,
+ size_mapped);
+ return ret;
+ }
+
+ size -= sz;
+ size_mapped += sz;
+ base += sz;
+ if (remap != MVEBU_MBUS_NO_REMAP)
+ remap += sz;
+ }
+
+ return 0;
+}
+
+static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ const struct mvebu_pcie_window *desired,
+ struct mvebu_pcie_window *cur)
+{
+ int ret;
+
+ if (desired->base == cur->base && desired->remap == cur->remap &&
+ desired->size == cur->size)
+ return 0;
+
+ if (cur->size != 0) {
+ mvebu_pcie_del_windows(port, cur->base, cur->size);
+ cur->size = 0;
+ cur->base = 0;
+
+ /*
+ * If something tries to change the window while it is enabled
+ * the change will not be done atomically. That would be
+ * difficult to do in the general case.
+ */
+ }
+
+ if (desired->size == 0)
+ return 0;
+
+ ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ if (ret) {
+ cur->size = 0;
+ cur->base = 0;
+ return ret;
+ }
+
+ *cur = *desired;
+ return 0;
+}
+
+static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+{
+ struct mvebu_pcie_window desired = {};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
+
+ /* Are the new iobase/iolimit values invalid? */
+ if (conf->iolimit < conf->iobase ||
+ le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications. iobase is the bus address, port->iowin_base
+ * is the CPU address.
+ */
+ desired.remap = ((conf->iobase & 0xF0) << 8) |
+ (le16_to_cpu(conf->iobaseupper) << 16);
+ desired.base = port->pcie->io.start + desired.remap;
+ desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
+ (le16_to_cpu(conf->iolimitupper) << 16)) -
+ desired.remap) +
+ 1;
+
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
+}
+
+static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+{
+ struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
+
+ /* Are the new membase/memlimit values invalid? */
+ if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications.
+ */
+ desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
+ desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base + 1;
+
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ *value = mvebu_readl(port, PCIE_CMD_OFF);
+ break;
+
+ case PCI_PRIMARY_BUS: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * secondary bus number which is mvebu local bus number.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
+ val &= ~0xff00;
+ val |= mvebu_pcie_get_local_bus_nr(port) << 8;
+ *value = val;
+ break;
+ }
+
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+ else
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+ *value = val;
+ break;
+ }
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_DEVCAP:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
+ break;
+
+ case PCI_EXP_DEVCTL:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ break;
+
+ case PCI_EXP_LNKCAP:
+ /*
+ * PCIe requires that the Clock Power Management capability bit
+ * is hard-wired to zero for downstream ports but HW returns 1.
+ * Additionally enable Data Link Layer Link Active Reporting
+ * Capable bit as DL_Active indication is provided too.
+ */
+ *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
+ ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
+ break;
+
+ case PCI_EXP_LNKCTL:
+ /* DL_Active indication is provided via PCIE_STAT_OFF */
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
+ (mvebu_pcie_link_up(port) ?
+ (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
+ break;
+
+ case PCI_EXP_SLTCTL: {
+ u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
+ u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
+ u32 val = 0;
+ /*
+ * When slot power limit was not specified in DT then
+ * ASPL_DISABLE bit is stored only in emulated config space.
+ * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
+ */
+ if (!port->slot_power_limit_value)
+ val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
+ else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
+ val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
+ /* This callback is 32-bit and in high bits is slot status. */
+ val |= slotsta << 16;
+ *value = val;
+ break;
+ }
+
+ case PCI_EXP_RTSTA:
+ *value = mvebu_readl(port, PCIE_RC_RTSTA);
+ break;
+
+ case PCI_EXP_DEVCAP2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case 0:
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG+0:
+ case PCI_ERR_HEADER_LOG+4:
+ case PCI_ERR_HEADER_LOG+8:
+ case PCI_ERR_HEADER_LOG+12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
+ break;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
+}
+
+static void
+mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+ struct pci_bridge_emul_conf *conf = &bridge->conf;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ mvebu_writel(port, new, PCIE_CMD_OFF);
+ break;
+
+ case PCI_IO_BASE:
+ if ((mask & 0xffff) && mvebu_has_ioport(port) &&
+ mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobase |= 0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ }
+ break;
+
+ case PCI_MEMORY_BASE:
+ if (mvebu_pcie_handle_membase_change(port)) {
+ /* On error disable mem range */
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
+ conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
+ }
+ break;
+
+ case PCI_IO_BASE_UPPER16:
+ if (mvebu_has_ioport(port) &&
+ mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobase |= 0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ }
+ break;
+
+ case PCI_PRIMARY_BUS:
+ if (mask & 0xff00)
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+ ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
+ else
+ ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ break;
+
+ case PCI_EXP_LNKCTL:
+ /*
+ * PCIe requires that the Enable Clock Power Management bit
+ * is hard-wired to zero for downstream ports but HW allows
+ * to change it.
+ */
+ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ break;
+
+ case PCI_EXP_SLTCTL:
+ /*
+ * Allow to change PCIE_SSPL_ENABLE bit only when slot power
+ * limit was specified in DT and configured into HW.
+ */
+ if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
+ port->slot_power_limit_value) {
+ u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
+ sspl &= ~PCIE_SSPL_ENABLE;
+ else
+ sspl |= PCIE_SSPL_ENABLE;
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+ }
+ break;
+
+ case PCI_EXP_RTSTA:
+ /*
+ * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
+ * is read-only and can be cleared only by writing 0b to the
+ * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
+ * clear PME via Interrupt Cause.
+ */
+ if (new & PCI_EXP_RTSTA_PME)
+ mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG+0:
+ case PCI_ERR_HEADER_LOG+4:
+ case PCI_ERR_HEADER_LOG+8:
+ case PCI_ERR_HEADER_LOG+12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .read_base = mvebu_pci_bridge_emul_base_conf_read,
+ .write_base = mvebu_pci_bridge_emul_base_conf_write,
+ .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+ .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
+ .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+{
+ unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
+ struct pci_bridge_emul *bridge = &port->bridge;
+ u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
+ u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
+ u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
+ u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
+ u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
+
+ bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
+ bridge->conf.device = cpu_to_le16(dev_id >> 16);
+ bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
+
+ if (mvebu_has_ioport(port)) {
+ /* We support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+ } else {
+ bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
+ }
+
+ /*
+ * Older mvebu hardware provides PCIe Capability structure only in
+ * version 1. New hardware provides it in version 2.
+ * Enable slot support which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently as there is no support for
+ * unplugging PCIe card from the slot. Assume that PCIe card is always
+ * connected in slot.
+ *
+ * Set physical slot number to port+1 as mvebu ports are indexed from
+ * zero and zero value is reserved for ports within the same silicon
+ * as Root Port which is not mvebu case.
+ *
+ * Also set correct slot power limit.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
+ FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
+
+ bridge->subsystem_vendor_id = ssdev_id & 0xffff;
+ bridge->subsystem_id = ssdev_id >> 16;
+ bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CAP_PCIEXP;
+ bridge->data = port;
+ bridge->ops = &mvebu_pci_bridge_emul_ops;
+
+ return pci_bridge_emul_init(bridge, bridge_flags);
+}
+
+static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn)
+{
+ int i;
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ if (!port->base)
+ continue;
+
+ if (bus->number == 0 && port->devfn == devfn)
+ return port;
+ if (bus->number != 0 &&
+ bus->number >= port->bridge.conf.secondary_bus &&
+ bus->number <= port->bridge.conf.subordinate_bus)
+ return port;
+ }
+
+ return NULL;
+}
+
+/* PCI configuration space write function */
+static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
+}
+
+/* PCI configuration space read function */
+static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
+}
+
+static struct pci_ops mvebu_pcie_ops = {
+ .read = mvebu_pcie_rd_conf,
+ .write = mvebu_pcie_wr_conf,
+};
+
+static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
+{
+ struct mvebu_pcie_port *port = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 unmask;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask &= ~PCIE_INT_INTX(hwirq);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
+{
+ struct mvebu_pcie_port *port = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 unmask;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask |= PCIE_INT_INTX(hwirq);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mvebu-INTx",
+ .irq_mask = mvebu_pcie_intx_irq_mask,
+ .irq_unmask = mvebu_pcie_intx_irq_unmask,
+};
+
+static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct mvebu_pcie_port *port = h->host_data;
+
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, port);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
+ .map = mvebu_pcie_intx_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
+{
+ struct device *dev = &port->pcie->pdev->dev;
+ struct device_node *pcie_intc_node;
+
+ raw_spin_lock_init(&port->irq_lock);
+
+ pcie_intc_node = of_get_next_child(port->dn, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
+ return -ENODEV;
+ }
+
+ port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops,
+ port);
+ of_node_put(pcie_intc_node);
+ if (!port->intx_irq_domain) {
+ dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mvebu_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = &port->pcie->pdev->dev;
+ u32 cause, unmask, status;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ status = cause & unmask;
+
+ /* Process legacy INTx interrupts */
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (!(status & PCIE_INT_INTX(i)))
+ continue;
+
+ if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
+ dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /* Interrupt support on mvebu emulated bridges is not implemented yet */
+ if (dev->bus->number == 0)
+ return 0; /* Proper return code 0 == NO_IRQ */
+
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
+{
+ if (dev->bus->number != 0)
+ return start;
+
+ /*
+ * On the PCI-to-PCI bridge side, the I/O windows must have at
+ * least a 64 KB size and the memory windows must have at
+ * least a 1 MB size. Moreover, MBus windows need to have a
+ * base address aligned on their size, and their size must be
+ * a power of two. This means that if the BAR doesn't have a
+ * power of two size, several MBus windows will actually be
+ * created. We need to ensure that the biggest MBus window
+ * (which will be the first one) is aligned on its size, which
+ * explains the rounddown_pow_of_two() being done here.
+ */
+ if (res->flags & IORESOURCE_IO)
+ return round_up(start, max_t(resource_size_t, SZ_64K,
+ rounddown_pow_of_two(size)));
+ else if (res->flags & IORESOURCE_MEM)
+ return round_up(start, max_t(resource_size_t, SZ_1M,
+ rounddown_pow_of_two(size)));
+ else
+ return start;
+}
+
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np,
+ struct mvebu_pcie_port *port)
+{
+ int ret = 0;
+
+ ret = of_address_to_resource(np, 0, &port->regs);
+ if (ret)
+ return (void __iomem *)ERR_PTR(ret);
+
+ return devm_ioremap_resource(&pdev->dev, &port->regs);
+}
+
+#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
+#define DT_TYPE_IO 0x1
+#define DT_TYPE_MEM32 0x2
+#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
+
+static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ unsigned long type,
+ unsigned int *tgt,
+ unsigned int *attr)
+{
+ const int na = 3, ns = 2;
+ const __be32 *range;
+ int rlen, nranges, rangesz, pna, i;
+
+ *tgt = -1;
+ *attr = -1;
+
+ range = of_get_property(np, "ranges", &rlen);
+ if (!range)
+ return -EINVAL;
+
+ pna = of_n_addr_cells(np);
+ rangesz = pna + na + ns;
+ nranges = rlen / sizeof(__be32) / rangesz;
+
+ for (i = 0; i < nranges; i++, range += rangesz) {
+ u32 flags = of_read_number(range, 1);
+ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+ unsigned long rtype;
+
+ if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ rtype = IORESOURCE_IO;
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ rtype = IORESOURCE_MEM;
+ else
+ continue;
+
+ if (slot == PCI_SLOT(devfn) && type == rtype) {
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int mvebu_pcie_suspend(struct device *dev)
+{
+ struct mvebu_pcie *pcie;
+ int i;
+
+ pcie = dev_get_drvdata(dev);
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
+ port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_pcie_resume(struct device *dev)
+{
+ struct mvebu_pcie *pcie;
+ int i;
+
+ pcie = dev_get_drvdata(dev);
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
+ mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
+ mvebu_pcie_setup_hw(port);
+ }
+
+ return 0;
+}
+
+static void mvebu_pcie_port_clk_put(void *data)
+{
+ struct mvebu_pcie_port *port = data;
+
+ clk_put(port->clk);
+}
+
+static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
+ struct mvebu_pcie_port *port, struct device_node *child)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u32 slot_power_limit;
+ int ret;
+ u32 num_lanes;
+
+ port->pcie = pcie;
+
+ if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
+ dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
+ child);
+ goto skip;
+ }
+
+ if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
+ port->lane = 0;
+
+ if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
+ port->is_x4 = true;
+
+ port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
+ port->lane);
+ if (!port->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ port->devfn = of_pci_get_devfn(child);
+ if (port->devfn < 0)
+ goto skip;
+ if (PCI_FUNC(port->devfn) != 0) {
+ dev_err(dev, "%s: invalid function number, must be zero\n",
+ port->name);
+ goto skip;
+ }
+
+ ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
+ &port->mem_target, &port->mem_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
+ port->name);
+ goto skip;
+ }
+
+ if (resource_size(&pcie->io) != 0) {
+ mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ } else {
+ port->io_target = -1;
+ port->io_attr = -1;
+ }
+
+ /*
+ * Old DT bindings do not contain "intx" interrupt
+ * so do not fail probing driver when interrupt does not exist.
+ */
+ port->intx_irq = of_irq_get_byname(child, "intx");
+ if (port->intx_irq == -EPROBE_DEFER) {
+ ret = port->intx_irq;
+ goto err;
+ }
+ if (port->intx_irq <= 0) {
+ dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
+ "%pOF does not contain intx interrupt\n",
+ port->name, child);
+ }
+
+ port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
+ port->name);
+ if (!port->reset_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child),
+ "reset", GPIOD_OUT_HIGH,
+ port->name);
+ ret = PTR_ERR_OR_ZERO(port->reset_gpio);
+ if (ret) {
+ if (ret != -ENOENT)
+ goto err;
+ /* reset gpio is optional */
+ port->reset_gpio = NULL;
+ devm_kfree(dev, port->reset_name);
+ port->reset_name = NULL;
+ }
+
+ slot_power_limit = of_pci_get_slot_power_limit(child,
+ &port->slot_power_limit_value,
+ &port->slot_power_limit_scale);
+ if (slot_power_limit)
+ dev_info(dev, "%s: Slot power limit %u.%uW\n",
+ port->name,
+ slot_power_limit / 1000,
+ (slot_power_limit / 100) % 10);
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "%s: cannot get clock\n", port->name);
+ goto skip;
+ }
+
+ ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0) {
+ clk_put(port->clk);
+ goto err;
+ }
+
+ return 1;
+
+skip:
+ ret = 0;
+
+ /* In the case of skipping, we need to free these */
+ devm_kfree(dev, port->reset_name);
+ port->reset_name = NULL;
+ devm_kfree(dev, port->name);
+ port->name = NULL;
+
+err:
+ return ret;
+}
+
+/*
+ * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
+ * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
+ * of the PCI Express Card Electromechanical Specification, 1.1.
+ */
+static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
+{
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret < 0)
+ return ret;
+
+ if (port->reset_gpio) {
+ u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
+
+ of_property_read_u32(port->dn, "reset-delay-us",
+ &reset_udelay);
+
+ udelay(100);
+
+ gpiod_set_value_cansleep(port->reset_gpio, 0);
+ msleep(reset_udelay / 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * Power down a PCIe port. Strictly, PCIe requires us to place the card
+ * in D3hot state before asserting PERST#.
+ */
+static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
+{
+ gpiod_set_value_cansleep(port->reset_gpio, 1);
+
+ clk_disable_unprepare(port->clk);
+}
+
+/*
+ * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * so we need extra resource setup parsing our special DT properties encoding
+ * the MEM and IO apertures.
+ */
+static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ int ret;
+
+ /* Get the PCIe memory aperture */
+ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+ if (resource_size(&pcie->mem) == 0) {
+ dev_err(dev, "invalid memory aperture size\n");
+ return -EINVAL;
+ }
+
+ pcie->mem.name = "PCI MEM";
+ pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
+ if (ret)
+ return ret;
+
+ /* Get the PCIe IO aperture */
+ mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+
+ if (resource_size(&pcie->io) != 0) {
+ pcie->realio.flags = pcie->io.flags;
+ pcie->realio.start = PCIBIOS_MIN_IO;
+ pcie->realio.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT - SZ_64K,
+ resource_size(&pcie->io) - 1);
+ pcie->realio.name = "PCI I/O";
+
+ ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
+ if (ret)
+ return ret;
+
+ pci_add_resource(&bridge->windows, &pcie->realio);
+ ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int num, i, ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ ret = mvebu_pcie_parse_request_resources(pcie);
+ if (ret)
+ return ret;
+
+ num = of_get_available_child_count(np);
+
+ pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
+ if (!pcie->ports)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ ret = mvebu_pcie_parse_port(pcie, port, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ } else if (ret == 0) {
+ continue;
+ }
+
+ port->dn = child;
+ i++;
+ }
+ pcie->nports = i;
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ int irq = port->intx_irq;
+
+ child = port->dn;
+ if (!child)
+ continue;
+
+ ret = mvebu_pcie_powerup(port);
+ if (ret < 0)
+ continue;
+
+ port->base = mvebu_pcie_map_registers(pdev, child, port);
+ if (IS_ERR(port->base)) {
+ dev_err(dev, "%s: cannot map registers\n", port->name);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+
+ ret = mvebu_pci_bridge_emul_init(port);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot init emulated bridge\n",
+ port->name);
+ devm_iounmap(dev, port->base);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+
+ if (irq > 0) {
+ ret = mvebu_pcie_init_irq_domain(port);
+ if (ret) {
+ dev_err(dev, "%s: cannot init irq domain\n",
+ port->name);
+ pci_bridge_emul_cleanup(&port->bridge);
+ devm_iounmap(dev, port->base);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+ irq_set_chained_handler_and_data(irq,
+ mvebu_pcie_irq_handler,
+ port);
+ }
+
+ /*
+ * PCIe topology exported by mvebu hw is quite complicated. In
+ * reality has something like N fully independent host bridges
+ * where each host bridge has one PCIe Root Port (which acts as
+ * PCI Bridge device). Each host bridge has its own independent
+ * internal registers, independent access to PCI config space,
+ * independent interrupt lines, independent window and memory
+ * access configuration. But additionally there is some kind of
+ * peer-to-peer support between PCIe devices behind different
+ * host bridges limited just to forwarding of memory and I/O
+ * transactions (forwarding of error messages and config cycles
+ * is not supported). So we could say there are N independent
+ * PCIe Root Complexes.
+ *
+ * For this kind of setup DT should have been structured into
+ * N independent PCIe controllers / host bridges. But instead
+ * structure in past was defined to put PCIe Root Ports of all
+ * host bridges into one bus zero, like in classic multi-port
+ * Root Complex setup with just one host bridge.
+ *
+ * This means that pci-mvebu.c driver provides "virtual" bus 0
+ * on which registers all PCIe Root Ports (PCI Bridge devices)
+ * specified in DT by their BDF addresses and virtually routes
+ * PCI config access of each PCI bridge device to specific PCIe
+ * host bridge.
+ *
+ * Normally PCI Bridge should choose between Type 0 and Type 1
+ * config requests based on primary and secondary bus numbers
+ * configured on the bridge itself. But because mvebu PCI Bridge
+ * does not have registers for primary and secondary bus numbers
+ * in its config space, it determinates type of config requests
+ * via its own custom way.
+ *
+ * There are two options how mvebu determinate type of config
+ * request.
+ *
+ * 1. If Secondary Bus Number Enable bit is not set or is not
+ * available (applies for pre-XP PCIe controllers) then Type 0
+ * is used if target bus number equals Local Bus Number (bits
+ * [15:8] in register 0x1a04) and target device number differs
+ * from Local Device Number (bits [20:16] in register 0x1a04).
+ * Type 1 is used if target bus number differs from Local Bus
+ * Number. And when target bus number equals Local Bus Number
+ * and target device equals Local Device Number then request is
+ * routed to Local PCI Bridge (PCIe Root Port).
+ *
+ * 2. If Secondary Bus Number Enable bit is set (bit 7 in
+ * register 0x1a2c) then mvebu hw determinate type of config
+ * request like compliant PCI Bridge based on primary bus number
+ * which is configured via Local Bus Number (bits [15:8] in
+ * register 0x1a04) and secondary bus number which is configured
+ * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
+ * Local PCI Bridge (PCIe Root Port) is available on primary bus
+ * as device with Local Device Number (bits [20:16] in register
+ * 0x1a04).
+ *
+ * Secondary Bus Number Enable bit is disabled by default and
+ * option 2. is not available on pre-XP PCIe controllers. Hence
+ * this driver always use option 1.
+ *
+ * Basically it means that primary and secondary buses shares
+ * one virtual number configured via Local Bus Number bits and
+ * Local Device Number bits determinates if accessing primary
+ * or secondary bus. Set Local Device Number to 1 and redirect
+ * all writes of PCI Bridge Secondary Bus Number register to
+ * Local Bus Number (bits [15:8] in register 0x1a04).
+ *
+ * So when accessing devices on buses behind secondary bus
+ * number it would work correctly. And also when accessing
+ * device 0 at secondary bus number via config space would be
+ * correctly routed to secondary bus. Due to issues described
+ * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
+ * are not accessed directly via PCI config space but rarher
+ * indirectly via kernel emulated PCI bridge driver.
+ */
+ mvebu_pcie_setup_hw(port);
+ mvebu_pcie_set_local_dev_nr(port, 1);
+ mvebu_pcie_set_local_bus_nr(port, 0);
+ }
+
+ bridge->sysdata = pcie;
+ bridge->ops = &mvebu_pcie_ops;
+ bridge->child_ops = &mvebu_pcie_child_ops;
+ bridge->align_resource = mvebu_pcie_align_resource;
+ bridge->map_irq = mvebu_pcie_map_irq;
+
+ return pci_host_probe(bridge);
+}
+
+static void mvebu_pcie_remove(struct platform_device *pdev)
+{
+ struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 cmd, sspl;
+ int i;
+
+ /* Remove PCI bus with all devices. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ int irq = port->intx_irq;
+
+ if (!port->base)
+ continue;
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /* Mask all interrupt sources. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
+
+ /* Clear all interrupt causes. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
+
+ if (irq > 0)
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+
+ /* Remove IRQ domains. */
+ if (port->intx_irq_domain)
+ irq_domain_remove(port->intx_irq_domain);
+
+ /* Free config space for emulated root bridge. */
+ pci_bridge_emul_cleanup(&port->bridge);
+
+ /* Disable sending Set_Slot_Power_Limit PCIe Message. */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
+ /* Disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
+
+ /* Delete PCIe IO and MEM windows. */
+ if (port->iowin.size)
+ mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
+ if (port->memwin.size)
+ mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
+
+ /* Power down card and disable clocks. Must be the last step. */
+ mvebu_pcie_powerdown(port);
+ }
+}
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-xp-pcie", },
+ { .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,dove-pcie", },
+ { .compatible = "marvell,kirkwood-pcie", },
+ {},
+};
+
+static const struct dev_pm_ops mvebu_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
+};
+
+static struct platform_driver mvebu_pcie_driver = {
+ .driver = {
+ .name = "mvebu-pcie",
+ .of_match_table = mvebu_pcie_of_match_table,
+ .pm = &mvebu_pcie_pm_ops,
+ },
+ .probe = mvebu_pcie_probe,
+ .remove_new = mvebu_pcie_remove,
+};
+module_platform_driver(mvebu_pcie_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("Marvell EBU PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
new file mode 100644
index 000000000..d29866485
--- /dev/null
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * Author: Valentine Barshak <valentine.barshak@cogentembedded.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0 0x0
+#define RCAR_PCIAHB_PREFETCH4 0x1
+#define RCAR_PCIAHB_PREFETCH8 0x2
+#define RCAR_PCIAHB_PREFETCH16 0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT (1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
+#define RCAR_PCI_INT_REMABORT (1 << 2)
+#define RCAR_PCI_INT_PERR (1 << 3)
+#define RCAR_PCI_INT_SIGSERR (1 << 4)
+#define RCAR_PCI_INT_RESERR (1 << 5)
+#define RCAR_PCI_INT_WIN1ERR (1 << 12)
+#define RCAR_PCI_INT_WIN2ERR (1 << 13)
+#define RCAR_PCI_INT_A (1 << 16)
+#define RCAR_PCI_INT_B (1 << 17)
+#define RCAR_PCI_INT_PME (1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_REMABORT | \
+ RCAR_PCI_INT_PERR | \
+ RCAR_PCI_INT_SIGSERR | \
+ RCAR_PCI_INT_RESERR | \
+ RCAR_PCI_INT_WIN1ERR | \
+ RCAR_PCI_INT_WIN2ERR)
+
+#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
+#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
+ RCAR_AHB_BUS_MMODE_BYTE_BURST | \
+ RCAR_AHB_BUS_MMODE_WR_INCR | \
+ RCAR_AHB_BUS_MMODE_HBUS_REQ | \
+ RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST (1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
+#define RCAR_USBCTR_PLL_RST (1 << 2)
+#define RCAR_USBCTR_DIRPD (1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+struct rcar_pci {
+ struct device *dev;
+ void __iomem *reg;
+ struct resource mem_res;
+ struct resource *cfg_res;
+ int irq;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct rcar_pci *priv = bus->sysdata;
+ int slot, val;
+
+ if (!pci_is_root_bus(bus) || PCI_FUNC(devfn))
+ return NULL;
+
+ /* Only one EHCI/OHCI device built-in */
+ slot = PCI_SLOT(devfn);
+ if (slot > 2)
+ return NULL;
+
+ /* bridge logic only has registers to 0x40 */
+ if (slot == 0x0 && where >= 0x40)
+ return NULL;
+
+ val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+ RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+ iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+ struct rcar_pci *priv = pw;
+ struct device *dev = priv->dev;
+ u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+ if (status & RCAR_PCI_INT_ALLERRORS) {
+ dev_err(dev, "error irq: status %08x\n", status);
+
+ /* clear the error(s) */
+ iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+ priv->reg + RCAR_PCI_INT_STATUS_REG);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci *priv)
+{
+ struct device *dev = priv->dev;
+ int ret;
+ u32 val;
+
+ ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
+ IRQF_SHARED, "error irq", priv);
+ if (ret) {
+ dev_err(dev, "cannot claim IRQ for error handling\n");
+ return;
+ }
+
+ val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+ val |= RCAR_PCI_INT_ALLERRORS;
+ iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci *priv) { }
+#endif
+
+/* PCI host controller setup */
+static void rcar_pci_setup(struct rcar_pci *priv)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv);
+ struct device *dev = priv->dev;
+ void __iomem *reg = priv->reg;
+ struct resource_entry *entry;
+ unsigned long window_size;
+ unsigned long window_addr;
+ unsigned long window_pci;
+ u32 val;
+
+ entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
+ if (!entry) {
+ window_addr = 0x40000000;
+ window_pci = 0x40000000;
+ window_size = SZ_1G;
+ } else {
+ window_addr = entry->res->start;
+ window_pci = entry->res->start - entry->offset;
+ window_size = resource_size(entry->res);
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+ dev_info(dev, "PCI: revision %x\n", val);
+
+ /* Disable Direct Power Down State and assert reset */
+ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+ val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+ udelay(4);
+
+ /* De-assert reset and reset PCIAHB window1 size */
+ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+ RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+
+ /* Setup PCIAHB window1 size */
+ switch (window_size) {
+ case SZ_2G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+ break;
+ case SZ_1G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+ break;
+ case SZ_512M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+ break;
+ default:
+ pr_warn("unknown window size %ld - defaulting to 256M\n",
+ window_size);
+ window_size = SZ_256M;
+ fallthrough;
+ case SZ_256M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+ break;
+ }
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+
+ /* Configure AHB master and slave modes */
+ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+ /* Configure PCI arbiter */
+ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+ val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+ RCAR_PCI_ARBITER_PCIBP_MODE;
+ iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+ /* PCI-AHB mapping */
+ iowrite32(window_addr | RCAR_PCIAHB_PREFETCH16,
+ reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+ /* AHB-PCI mapping: OHCI/EHCI registers */
+ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+ iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+ /* Enable AHB-PCI bridge PCI configuration access */
+ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+ reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ /* Set PCI-AHB Window1 address */
+ iowrite32(window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ reg + PCI_BASE_ADDRESS_1);
+ /* Set AHB-PCI bridge PCI communication area address */
+ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+ iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+ val = ioread32(reg + PCI_COMMAND);
+ val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ iowrite32(val, reg + PCI_COMMAND);
+
+ /* Enable PCI interrupts */
+ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+ reg + RCAR_PCI_INT_ENABLE_REG);
+
+ rcar_pci_setup_errirq(priv);
+}
+
+static struct pci_ops rcar_pci_ops = {
+ .map_bus = rcar_pci_cfg_base,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int rcar_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *cfg_res, *mem_res;
+ struct rcar_pci *priv;
+ struct pci_host_bridge *bridge;
+ void __iomem *reg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv));
+ if (!bridge)
+ return -ENOMEM;
+
+ priv = pci_host_bridge_priv(bridge);
+ bridge->sysdata = priv;
+
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, &cfg_res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_res || !mem_res->start)
+ return -ENODEV;
+
+ if (mem_res->start & 0xFFFF)
+ return -EINVAL;
+
+ priv->mem_res = *mem_res;
+ priv->cfg_res = cfg_res;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ priv->reg = reg;
+ priv->dev = dev;
+
+ if (priv->irq < 0) {
+ dev_err(dev, "no valid irq found\n");
+ return priv->irq;
+ }
+
+ bridge->ops = &rcar_pci_ops;
+
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ rcar_pci_setup(priv);
+
+ return pci_host_probe(bridge);
+}
+
+static const struct of_device_id rcar_pci_of_match[] = {
+ { .compatible = "renesas,pci-r8a7790", },
+ { .compatible = "renesas,pci-r8a7791", },
+ { .compatible = "renesas,pci-r8a7794", },
+ { .compatible = "renesas,pci-rcar-gen2", },
+ { .compatible = "renesas,pci-rzn1", },
+ { },
+};
+
+static struct platform_driver rcar_pci_driver = {
+ .driver = {
+ .name = "pci-rcar-gen2",
+ .suppress_bind_attrs = true,
+ .of_match_table = rcar_pci_of_match,
+ },
+ .probe = rcar_pci_probe,
+};
+builtin_platform_driver(rcar_pci_driver);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
new file mode 100644
index 000000000..038d974a3
--- /dev/null
+++ b/drivers/pci/controller/pci-tegra.c
@@ -0,0 +1,2811 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Tegra SoCs
+ *
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Bits taken from arch/arm/mach-dove/pcie.c
+ *
+ * Author: Thierry Reding <treding@nvidia.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/pmc.h>
+
+#include "../pci.h"
+
+#define INT_PCI_MSI_NR (8 * 32)
+
+/* register definitions */
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_MSI_VEC(x) (0x6c + ((x) * 4))
+#define AFI_MSI_EN_VEC(x) (0x8c + ((x) * 4))
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_INTR_CODE 0xb8
+#define AFI_INTR_CODE_MASK 0xf
+#define AFI_INTR_INI_SLAVE_ERROR 1
+#define AFI_INTR_INI_DECODE_ERROR 2
+#define AFI_INTR_TARGET_ABORT 3
+#define AFI_INTR_MASTER_ABORT 4
+#define AFI_INTR_INVALID_WRITE 5
+#define AFI_INTR_LEGACY 6
+#define AFI_INTR_FPCI_DECODE_ERROR 7
+#define AFI_INTR_AXI_DECODE_ERROR 8
+#define AFI_INTR_FPCI_TIMEOUT 9
+#define AFI_INTR_PE_PRSNT_SENSE 10
+#define AFI_INTR_PE_CLKREQ_SENSE 11
+#define AFI_INTR_CLKCLAMP_SENSE 12
+#define AFI_INTR_RDY4PD_SENSE 13
+#define AFI_INTR_P2P_ERROR 14
+
+#define AFI_INTR_SIGNATURE 0xbc
+#define AFI_UPPER_FPCI_ADDRESS 0xc0
+#define AFI_SM_INTR_ENABLE 0xc4
+#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
+#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
+#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
+#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
+#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
+#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
+#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
+#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
+
+#define AFI_PCIE_PME 0xf0
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
+#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
+
+#define AFI_PLLE_CONTROL 0x160
+#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
+
+#define AFI_PEXBIAS_CTRL_0 0x168
+
+#define RP_ECTL_2_R1 0x00000e84
+#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R1 0x00000e8c
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R1 0x00000e90
+#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R1 0x00000e94
+#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
+#define RP_ECTL_2_R2 0x00000ea4
+#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R2 0x00000eac
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R2 0x00000eb0
+#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R2 0x00000eb4
+#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
+#define RP_VEND_XP 0x00000f00
+#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
+#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
+#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
+
+#define RP_VEND_CTL0 0x00000f44
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
+
+#define RP_VEND_CTL1 0x00000f48
+#define RP_VEND_CTL1_ERPT (1 << 13)
+
+#define RP_VEND_XP_BIST 0x00000f4c
+#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
+
+#define RP_VEND_CTL2 0x00000fa8
+#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
+#define RP_PRIV_MISC 0x00000fe0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+#define RP_LINK_CONTROL_STATUS_2 0x000000b0
+
+#define PADS_CTL_SEL 0x0000009c
+
+#define PADS_CTL 0x000000a0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#define PADS_PLL_CTL_TEGRA20 0x000000b8
+#define PADS_PLL_CTL_TEGRA30 0x000000b4
+#define PADS_PLL_CTL_RST_B4SM (1 << 1)
+#define PADS_PLL_CTL_LOCKDET (1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
+
+#define PADS_REFCLK_CFG0 0x000000c8
+#define PADS_REFCLK_CFG1 0x000000cc
+#define PADS_REFCLK_BIAS 0x000000d0
+
+/*
+ * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
+ * entries, one entry per PCIe port. These field definitions and desired
+ * values aren't in the TRM, but do come from NVIDIA.
+ */
+#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
+#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
+#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
+#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
+
+#define PME_ACK_TIMEOUT 10000
+#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
+
+struct tegra_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
+ void *virt;
+ dma_addr_t phys;
+ int irq;
+};
+
+/* used to differentiate between Tegra SoC generations */
+struct tegra_pcie_port_soc {
+ struct {
+ u8 turnoff_bit;
+ u8 ack_bit;
+ } pme;
+};
+
+struct tegra_pcie_soc {
+ unsigned int num_ports;
+ const struct tegra_pcie_port_soc *ports;
+ unsigned int msi_base_shift;
+ unsigned long afi_pex2_ctrl;
+ u32 pads_pll_ctl;
+ u32 tx_ref_sel;
+ u32 pads_refclk_cfg0;
+ u32 pads_refclk_cfg1;
+ u32 update_fc_threshold;
+ bool has_pex_clkreq_en;
+ bool has_pex_bias_ctrl;
+ bool has_intr_prsnt_sense;
+ bool has_cml_clk;
+ bool has_gen2;
+ bool force_pca_enable;
+ bool program_uphy;
+ bool update_clamp_threshold;
+ bool program_deskew_time;
+ bool update_fc_timer;
+ bool has_cache_bars;
+ struct {
+ struct {
+ u32 rp_ectl_2_r1;
+ u32 rp_ectl_4_r1;
+ u32 rp_ectl_5_r1;
+ u32 rp_ectl_6_r1;
+ u32 rp_ectl_2_r2;
+ u32 rp_ectl_4_r2;
+ u32 rp_ectl_5_r2;
+ u32 rp_ectl_6_r2;
+ } regs;
+ bool enable;
+ } ectl;
+};
+
+struct tegra_pcie {
+ struct device *dev;
+
+ void __iomem *pads;
+ void __iomem *afi;
+ void __iomem *cfg;
+ int irq;
+
+ struct resource cs;
+
+ struct clk *pex_clk;
+ struct clk *afi_clk;
+ struct clk *pll_e;
+ struct clk *cml_clk;
+
+ struct reset_control *pex_rst;
+ struct reset_control *afi_rst;
+ struct reset_control *pcie_xrst;
+
+ bool legacy_phy;
+ struct phy *phy;
+
+ struct tegra_msi msi;
+
+ struct list_head ports;
+ u32 xbar_config;
+
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
+
+ const struct tegra_pcie_soc *soc;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
+{
+ return container_of(msi, struct tegra_pcie, msi);
+}
+
+struct tegra_pcie_port {
+ struct tegra_pcie *pcie;
+ struct device_node *np;
+ struct list_head list;
+ struct resource regs;
+ void __iomem *base;
+ unsigned int index;
+ unsigned int lanes;
+
+ struct phy **phys;
+
+ struct gpio_desc *reset_gpio;
+};
+
+static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->afi + offset);
+}
+
+static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->afi + offset);
+}
+
+static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->pads + offset);
+}
+
+static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->pads + offset);
+}
+
+/*
+ * The configuration space mapping on Tegra is somewhat similar to the ECAM
+ * defined by PCIe. However it deviates a bit in how the 4 bits for extended
+ * register accesses are mapped:
+ *
+ * [27:24] extended register number
+ * [23:16] bus number
+ * [15:11] device number
+ * [10: 8] function number
+ * [ 7: 0] register number
+ *
+ * Mapping the whole extended configuration space would require 256 MiB of
+ * virtual address space, only a small part of which will actually be used.
+ *
+ * To work around this, a 4 KiB region is used to generate the required
+ * configuration transaction with relevant B:D:F and register offset values.
+ * This is achieved by dynamically programming base address and size of
+ * AFI_AXI_BAR used for end point config space mapping to make sure that the
+ * address (access to which generates correct config transaction) falls in
+ * this 4 KiB region.
+ */
+static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
+ unsigned int where)
+{
+ return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & 0xff);
+}
+
+static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct tegra_pcie *pcie = bus->sysdata;
+ void __iomem *addr = NULL;
+
+ if (bus->number == 0) {
+ unsigned int slot = PCI_SLOT(devfn);
+ struct tegra_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ if (port->index + 1 == slot) {
+ addr = port->base + (where & ~3);
+ break;
+ }
+ }
+ } else {
+ unsigned int offset;
+ u32 base;
+
+ offset = tegra_pcie_conf_offset(bus->number, devfn, where);
+
+ /* move 4 KiB window to offset within the FPCI region */
+ base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
+ afi_writel(pcie, base, AFI_FPCI_BAR0);
+
+ /* move to correct offset within the 4 KiB page */
+ addr = pcie->cfg + (offset & (SZ_4K - 1));
+ }
+
+ return addr;
+}
+
+static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static struct pci_ops tegra_pcie_ops = {
+ .map_bus = tegra_pcie_map_bus,
+ .read = tegra_pcie_config_read,
+ .write = tegra_pcie_config_write,
+};
+
+static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ unsigned long ret = 0;
+
+ switch (port->index) {
+ case 0:
+ ret = AFI_PEX0_CTRL;
+ break;
+
+ case 1:
+ ret = AFI_PEX1_CTRL;
+ break;
+
+ case 2:
+ ret = soc->afi_pex2_ctrl;
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* pulse reset signal */
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 1);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
+
+ usleep_range(1000, 2000);
+
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 0);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
+}
+
+static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /* Enable AER capability */
+ value = readl(port->base + RP_VEND_CTL1);
+ value |= RP_VEND_CTL1_ERPT;
+ writel(value, port->base + RP_VEND_CTL1);
+
+ /* Optimal settings to enhance bandwidth */
+ value = readl(port->base + RP_VEND_XP);
+ value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
+ value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
+ writel(value, port->base + RP_VEND_XP);
+
+ /*
+ * LTSSM will wait for DLLP to finish before entering L1 or L2,
+ * to avoid truncation of PM messages which results in receiver errors
+ */
+ value = readl(port->base + RP_VEND_XP_BIST);
+ value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
+ writel(value, port->base + RP_VEND_XP_BIST);
+
+ value = readl(port->base + RP_PRIV_MISC);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
+ value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
+
+ if (soc->update_clamp_threshold) {
+ value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
+ }
+
+ writel(value, port->base + RP_PRIV_MISC);
+}
+
+static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ value = readl(port->base + RP_ECTL_2_R1);
+ value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r1;
+ writel(value, port->base + RP_ECTL_2_R1);
+
+ value = readl(port->base + RP_ECTL_4_R1);
+ value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r1 <<
+ RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R1);
+
+ value = readl(port->base + RP_ECTL_5_R1);
+ value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r1;
+ writel(value, port->base + RP_ECTL_5_R1);
+
+ value = readl(port->base + RP_ECTL_6_R1);
+ value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r1;
+ writel(value, port->base + RP_ECTL_6_R1);
+
+ value = readl(port->base + RP_ECTL_2_R2);
+ value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r2;
+ writel(value, port->base + RP_ECTL_2_R2);
+
+ value = readl(port->base + RP_ECTL_4_R2);
+ value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r2 <<
+ RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R2);
+
+ value = readl(port->base + RP_ECTL_5_R2);
+ value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r2;
+ writel(value, port->base + RP_ECTL_5_R2);
+
+ value = readl(port->base + RP_ECTL_6_R2);
+ value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r2;
+ writel(value, port->base + RP_ECTL_6_R2);
+}
+
+static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /*
+ * Sometimes link speed change from Gen2 to Gen1 fails due to
+ * instability in deskew logic on lane-0. Increase the deskew
+ * retry time to resolve this issue.
+ */
+ if (soc->program_deskew_time) {
+ value = readl(port->base + RP_VEND_CTL0);
+ value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
+ value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
+ writel(value, port->base + RP_VEND_CTL0);
+ }
+
+ if (soc->update_fc_timer) {
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+ value |= soc->update_fc_threshold;
+ writel(value, port->base + RP_VEND_XP);
+ }
+
+ /*
+ * PCIe link doesn't come up with few legacy PCIe endpoints if
+ * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
+ * Hence, the strategy followed here is to initially advertise
+ * only Gen-1 and after link is up, retrain link to Gen-2 speed
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_2_5GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+}
+
+static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ unsigned long value;
+
+ /* enable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_REFCLK_EN;
+
+ if (soc->has_pex_clkreq_en)
+ value |= AFI_PEX_CTRL_CLKREQ_EN;
+
+ value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
+ afi_writel(port->pcie, value, ctrl);
+
+ tegra_pcie_port_reset(port);
+
+ if (soc->force_pca_enable) {
+ value = readl(port->base + RP_VEND_CTL2);
+ value |= RP_VEND_CTL2_PCA_ENABLE;
+ writel(value, port->base + RP_VEND_CTL2);
+ }
+
+ tegra_pcie_enable_rp_features(port);
+
+ if (soc->ectl.enable)
+ tegra_pcie_program_ectl_settings(port);
+
+ tegra_pcie_apply_sw_fixup(port);
+}
+
+static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ unsigned long value;
+
+ /* assert port reset */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ /* disable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+
+ if (soc->has_pex_clkreq_en)
+ value &= ~AFI_PEX_CTRL_CLKREQ_EN;
+
+ value &= ~AFI_PEX_CTRL_REFCLK_EN;
+ afi_writel(port->pcie, value, ctrl);
+
+ /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
+ value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
+}
+
+static void tegra_pcie_port_free(struct tegra_pcie_port *port)
+{
+ struct tegra_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+
+ devm_iounmap(dev, port->base);
+ devm_release_mem_region(dev, port->regs.start,
+ resource_size(&port->regs));
+ list_del(&port->list);
+ devm_kfree(dev, port);
+}
+
+/* Tegra PCIE root complex wrongly reports device class */
+static void tegra_pcie_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
+
+/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
+static void tegra_pcie_relax_enable(struct pci_dev *dev)
+{
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
+
+static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct tegra_pcie *pcie = pdev->bus->sysdata;
+ int irq;
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ irq = of_irq_parse_and_map_pci(pdev, slot, pin);
+ if (!irq)
+ irq = pcie->irq;
+
+ return irq;
+}
+
+static irqreturn_t tegra_pcie_isr(int irq, void *arg)
+{
+ static const char * const err_msg[] = {
+ "Unknown",
+ "AXI slave error",
+ "AXI decode error",
+ "Target abort",
+ "Master abort",
+ "Invalid write",
+ "Legacy interrupt",
+ "Response decoding error",
+ "AXI response decoding error",
+ "Transaction timeout",
+ "Slot present pin change",
+ "Slot clock request change",
+ "TMS clock ramp change",
+ "TMS ready for power down",
+ "Peer2Peer error",
+ };
+ struct tegra_pcie *pcie = arg;
+ struct device *dev = pcie->dev;
+ u32 code, signature;
+
+ code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
+ signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
+ afi_writel(pcie, 0, AFI_INTR_CODE);
+
+ if (code == AFI_INTR_LEGACY)
+ return IRQ_NONE;
+
+ if (code >= ARRAY_SIZE(err_msg))
+ code = 0;
+
+ /*
+ * do not pollute kernel log with master abort reports since they
+ * happen a lot during enumeration
+ */
+ if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
+ dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
+ else
+ dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
+
+ if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
+ code == AFI_INTR_FPCI_DECODE_ERROR) {
+ u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
+ u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
+
+ if (code == AFI_INTR_MASTER_ABORT)
+ dev_dbg(dev, " FPCI address: %10llx\n", address);
+ else
+ dev_err(dev, " FPCI address: %10llx\n", address);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * FPCI map is as follows:
+ * - 0xfdfc000000: I/O space
+ * - 0xfdfe000000: type 0 configuration space
+ * - 0xfdff000000: type 1 configuration space
+ * - 0xfe00000000: type 0 extended configuration space
+ * - 0xfe10000000: type 1 extended configuration space
+ */
+static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
+{
+ u32 size;
+ struct resource_entry *entry;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ /* Bar 0: type 1 extended configuration space */
+ size = resource_size(&pcie->cs);
+ afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ u32 fpci_bar, axi_address;
+ struct resource *res = entry->res;
+
+ size = resource_size(res);
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ /* Bar 1: downstream IO bar */
+ fpci_bar = 0xfdfc0000;
+ axi_address = pci_pio_to_address(res->start);
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+ break;
+ case IORESOURCE_MEM:
+ fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
+ axi_address = res->start;
+
+ if (res->flags & IORESOURCE_PREFETCH) {
+ /* Bar 2: prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+ } else {
+ /* Bar 3: non prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+ }
+ break;
+ }
+ }
+
+ /* NULL out the remaining BARs as they are not used */
+ afi_writel(pcie, 0, AFI_AXI_BAR4_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR4);
+
+ afi_writel(pcie, 0, AFI_AXI_BAR5_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR5);
+
+ if (pcie->soc->has_cache_bars) {
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+ }
+
+ /* MSI translations are setup only when needed */
+ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+ afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+}
+
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ if (value & PADS_PLL_CTL_LOCKDET)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ u32 value;
+ int err;
+
+ /* initialize internal PHY, enable up to 16 PCIE lanes */
+ pads_writel(pcie, 0x0, PADS_CTL_SEL);
+
+ /* override IDDQ to 1 on all 4 lanes */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /*
+ * Set up PHY PLL inputs select PLLE output as refclock,
+ * set TX ref sel to div10 (not div5).
+ */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+ value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ usleep_range(20, 100);
+
+ /* take PLL out of reset */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value |= PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* wait for the PLL to lock */
+ err = tegra_pcie_pll_wait(pcie, 500);
+ if (err < 0) {
+ dev_err(dev, "PLL failed to lock: %d\n", err);
+ return err;
+ }
+
+ /* turn off IDDQ override */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* enable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ return 0;
+}
+
+static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ u32 value;
+
+ /* disable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* override IDDQ */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < port->lanes; i++) {
+ err = phy_power_on(port->phys[i]);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < port->lanes; i++) {
+ err = phy_power_off(port->phys[i]);
+ if (err < 0) {
+ dev_err(dev, "failed to power off PHY#%u: %d\n", i,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (pcie->legacy_phy) {
+ if (pcie->phy)
+ err = phy_power_on(pcie->phy);
+ else
+ err = tegra_pcie_phy_enable(pcie);
+
+ if (err < 0)
+ dev_err(dev, "failed to power on PHY: %d\n", err);
+
+ return err;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_phy_power_on(port);
+ if (err < 0) {
+ dev_err(dev,
+ "failed to power on PCIe port %u PHY: %d\n",
+ port->index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (pcie->legacy_phy) {
+ if (pcie->phy)
+ err = phy_power_off(pcie->phy);
+ else
+ err = tegra_pcie_phy_disable(pcie);
+
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY: %d\n", err);
+
+ return err;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_phy_power_off(port);
+ if (err < 0) {
+ dev_err(dev,
+ "failed to power off PCIe port %u PHY: %d\n",
+ port->index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ struct tegra_pcie_port *port;
+ unsigned long value;
+
+ /* enable PLL power down */
+ if (pcie->phy) {
+ value = afi_readl(pcie, AFI_PLLE_CONTROL);
+ value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+ value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+ afi_writel(pcie, value, AFI_PLLE_CONTROL);
+ }
+
+ /* power down PCIe slot clock bias pad */
+ if (soc->has_pex_bias_ctrl)
+ afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+ /* configure mode and disable all ports */
+ value = afi_readl(pcie, AFI_PCIE_CONFIG);
+ value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ }
+
+ afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+ if (soc->has_gen2) {
+ value = afi_readl(pcie, AFI_FUSE);
+ value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ } else {
+ value = afi_readl(pcie, AFI_FUSE);
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ }
+
+ /* Disable AFI dynamic clock gating and enable PCIe */
+ value = afi_readl(pcie, AFI_CONFIGURATION);
+ value |= AFI_CONFIGURATION_EN_FPCI;
+ value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
+ afi_writel(pcie, value, AFI_CONFIGURATION);
+
+ value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
+ AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
+ AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
+
+ if (soc->has_intr_prsnt_sense)
+ value |= AFI_INTR_EN_PRSNT_SENSE;
+
+ afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
+ afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
+
+ /* don't enable MSI for now, only when needed */
+ afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
+
+ /* disable all exceptions */
+ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
+}
+
+static void tegra_pcie_power_off(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ int err;
+
+ reset_control_assert(pcie->afi_rst);
+
+ clk_disable_unprepare(pcie->pll_e);
+ if (soc->has_cml_clk)
+ clk_disable_unprepare(pcie->cml_clk);
+ clk_disable_unprepare(pcie->afi_clk);
+
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_warn(dev, "failed to disable regulators: %d\n", err);
+}
+
+static int tegra_pcie_power_on(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ int err;
+
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
+
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ /* enable regulators */
+ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_err(dev, "failed to enable regulators: %d\n", err);
+
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
+ if (err) {
+ dev_err(dev, "failed to power ungate: %d\n", err);
+ goto regulator_disable;
+ }
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
+ if (err) {
+ dev_err(dev, "failed to remove clamp: %d\n", err);
+ goto powergate;
+ }
+ }
+
+ err = clk_prepare_enable(pcie->afi_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable AFI clock: %d\n", err);
+ goto powergate;
+ }
+
+ if (soc->has_cml_clk) {
+ err = clk_prepare_enable(pcie->cml_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable CML clock: %d\n", err);
+ goto disable_afi_clk;
+ }
+ }
+
+ err = clk_prepare_enable(pcie->pll_e);
+ if (err < 0) {
+ dev_err(dev, "failed to enable PLLE clock: %d\n", err);
+ goto disable_cml_clk;
+ }
+
+ reset_control_deassert(pcie->afi_rst);
+
+ return 0;
+
+disable_cml_clk:
+ if (soc->has_cml_clk)
+ clk_disable_unprepare(pcie->cml_clk);
+disable_afi_clk:
+ clk_disable_unprepare(pcie->afi_clk);
+powergate:
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+regulator_disable:
+ regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+
+ return err;
+}
+
+static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+
+ /* Configure the reference clock driver */
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
+}
+
+static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+
+ pcie->pex_clk = devm_clk_get(dev, "pex");
+ if (IS_ERR(pcie->pex_clk))
+ return PTR_ERR(pcie->pex_clk);
+
+ pcie->afi_clk = devm_clk_get(dev, "afi");
+ if (IS_ERR(pcie->afi_clk))
+ return PTR_ERR(pcie->afi_clk);
+
+ pcie->pll_e = devm_clk_get(dev, "pll_e");
+ if (IS_ERR(pcie->pll_e))
+ return PTR_ERR(pcie->pll_e);
+
+ if (soc->has_cml_clk) {
+ pcie->cml_clk = devm_clk_get(dev, "cml");
+ if (IS_ERR(pcie->cml_clk))
+ return PTR_ERR(pcie->cml_clk);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+
+ pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
+ if (IS_ERR(pcie->pex_rst))
+ return PTR_ERR(pcie->pex_rst);
+
+ pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
+ if (IS_ERR(pcie->afi_rst))
+ return PTR_ERR(pcie->afi_rst);
+
+ pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
+ if (IS_ERR(pcie->pcie_xrst))
+ return PTR_ERR(pcie->pcie_xrst);
+
+ return 0;
+}
+
+static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int err;
+
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ err = PTR_ERR(pcie->phy);
+ dev_err(dev, "failed to get PHY: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(pcie->phy);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize PHY: %d\n", err);
+ return err;
+ }
+
+ pcie->legacy_phy = true;
+
+ return 0;
+}
+
+static struct phy *devm_of_phy_optional_get_index(struct device *dev,
+ struct device_node *np,
+ const char *consumer,
+ unsigned int index)
+{
+ struct phy *phy;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ phy = devm_of_phy_optional_get(dev, np, name);
+ kfree(name);
+
+ return phy;
+}
+
+static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ struct phy *phy;
+ unsigned int i;
+ int err;
+
+ port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ if (!port->phys)
+ return -ENOMEM;
+
+ for (i = 0; i < port->lanes; i++) {
+ phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to get PHY#%u: %ld\n", i,
+ PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ err = phy_init(phy);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
+ err);
+ return err;
+ }
+
+ port->phys[i] = phy;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ struct device_node *np = pcie->dev->of_node;
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (!soc->has_gen2 || of_property_present(np, "phys"))
+ return tegra_pcie_phys_get_legacy(pcie);
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_get_phys(port);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port;
+ struct device *dev = pcie->dev;
+ int err, i;
+
+ if (pcie->legacy_phy) {
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
+ return;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ for (i = 0; i < port->lanes; i++) {
+ err = phy_exit(port->phys[i]);
+ if (err < 0)
+ dev_err(dev, "failed to teardown PHY#%u: %d\n",
+ i, err);
+ }
+ }
+}
+
+static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ int err;
+
+ err = tegra_pcie_clocks_get(pcie);
+ if (err) {
+ dev_err(dev, "failed to get clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_resets_get(pcie);
+ if (err) {
+ dev_err(dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ if (soc->program_uphy) {
+ err = tegra_pcie_phys_get(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to get PHYs: %d\n", err);
+ return err;
+ }
+ }
+
+ pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
+ if (IS_ERR(pcie->pads)) {
+ err = PTR_ERR(pcie->pads);
+ goto phys_put;
+ }
+
+ pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
+ if (IS_ERR(pcie->afi)) {
+ err = PTR_ERR(pcie->afi);
+ goto phys_put;
+ }
+
+ /* request configuration space, but remap later, on demand */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+ if (!res) {
+ err = -EADDRNOTAVAIL;
+ goto phys_put;
+ }
+
+ pcie->cs = *res;
+
+ /* constrain configuration space to 4 KiB */
+ pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+
+ pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
+ if (IS_ERR(pcie->cfg)) {
+ err = PTR_ERR(pcie->cfg);
+ goto phys_put;
+ }
+
+ /* request interrupt */
+ err = platform_get_irq_byname(pdev, "intr");
+ if (err < 0)
+ goto phys_put;
+
+ pcie->irq = err;
+
+ err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
+ if (err) {
+ dev_err(dev, "failed to register IRQ: %d\n", err);
+ goto phys_put;
+ }
+
+ return 0;
+
+phys_put:
+ if (soc->program_uphy)
+ tegra_pcie_phys_put(pcie);
+
+ return err;
+}
+
+static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+
+ if (pcie->irq > 0)
+ free_irq(pcie->irq, pcie);
+
+ if (soc->program_uphy)
+ tegra_pcie_phys_put(pcie);
+
+ return 0;
+}
+
+static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
+{
+ struct tegra_pcie *pcie = port->pcie;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ int err;
+ u32 val;
+ u8 ack_bit;
+
+ val = afi_readl(pcie, AFI_PCIE_PME);
+ val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
+ afi_writel(pcie, val, AFI_PCIE_PME);
+
+ ack_bit = soc->ports[port->index].pme.ack_bit;
+ err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
+ val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
+ if (err)
+ dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
+ port->index);
+
+ usleep_range(10000, 11000);
+
+ val = afi_readl(pcie, AFI_PCIE_PME);
+ val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
+ afi_writel(pcie, val, AFI_PCIE_PME);
+}
+
+static void tegra_pcie_msi_irq(struct irq_desc *desc)
+{
+ struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct tegra_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
+ unsigned int i;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < 8; i++) {
+ unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
+
+ while (reg) {
+ unsigned int offset = find_first_bit(&reg, 32);
+ unsigned int index = i * 32 + offset;
+ int ret;
+
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
+ /*
+ * that's weird who triggered this?
+ * just clear it
+ */
+ dev_info(dev, "unexpected MSI\n");
+ afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = afi_readl(pcie, AFI_MSI_VEC(i));
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void tegra_msi_top_irq_ack(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void tegra_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void tegra_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip tegra_msi_top_chip = {
+ .name = "Tegra PCIe MSI",
+ .irq_ack = tegra_msi_top_irq_ack,
+ .irq_mask = tegra_msi_top_irq_mask,
+ .irq_unmask = tegra_msi_top_irq_unmask,
+};
+
+static void tegra_msi_irq_ack(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+
+ /* clear the interrupt */
+ afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
+}
+
+static void tegra_msi_irq_mask(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
+
+static void tegra_msi_irq_unmask(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
+
+static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
+
+ msg->address_lo = lower_32_bits(msi->phys);
+ msg->address_hi = upper_32_bits(msi->phys);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip tegra_msi_bottom_chip = {
+ .name = "Tegra MSI",
+ .irq_ack = tegra_msi_irq_ack,
+ .irq_mask = tegra_msi_irq_mask,
+ .irq_unmask = tegra_msi_irq_unmask,
+ .irq_set_affinity = tegra_msi_set_affinity,
+ .irq_compose_msi_msg = tegra_compose_msi_msg,
+};
+
+static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct tegra_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
+
+ mutex_lock(&msi->map_lock);
+
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &tegra_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ return 0;
+}
+
+static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct tegra_msi *msi = domain->host_data;
+
+ mutex_lock(&msi->map_lock);
+
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+}
+
+static const struct irq_domain_ops tegra_msi_domain_ops = {
+ .alloc = tegra_msi_domain_alloc,
+ .free = tegra_msi_domain_free,
+};
+
+static struct msi_domain_info tegra_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &tegra_msi_top_chip,
+};
+
+static int tegra_allocate_domains(struct tegra_msi *msi)
+{
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &tegra_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tegra_free_domains(struct tegra_msi *msi)
+{
+ struct irq_domain *parent = msi->domain->parent;
+
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
+}
+
+static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct tegra_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
+ int err;
+
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = tegra_allocate_domains(msi);
+ if (err)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "msi");
+ if (err < 0)
+ goto free_irq_domain;
+
+ msi->irq = err;
+
+ irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
+
+ /* Though the PCIe controller can address >32-bit address space, to
+ * facilitate endpoints that support only 32-bit MSI target address,
+ * the mask is set to 32-bit to make sure that MSI target address is
+ * always a 32-bit address
+ */
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err < 0) {
+ dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
+ goto free_irq;
+ }
+
+ msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!msi->virt) {
+ dev_err(dev, "failed to allocate DMA memory for MSI\n");
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+free_irq_domain:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
+
+ return err;
+}
+
+static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ struct tegra_msi *msi = &pcie->msi;
+ u32 reg, msi_state[INT_PCI_MSI_NR / 32];
+ int i;
+
+ afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
+ /* this register is in 4K increments */
+ afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
+
+ /* Restore the MSI allocation state */
+ bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
+ for (i = 0; i < ARRAY_SIZE(msi_state); i++)
+ afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
+
+ /* and unmask the MSI interrupt */
+ reg = afi_readl(pcie, AFI_INTR_MASK);
+ reg |= AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, reg, AFI_INTR_MASK);
+}
+
+static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
+{
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned int i, irq;
+
+ dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_domain_free_irqs(irq, 1);
+ }
+
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
+}
+
+static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
+{
+ u32 value;
+
+ /* mask the MSI interrupt */
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+
+ return 0;
+}
+
+static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
+{
+ u32 value;
+
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_INT_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+}
+
+static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
+ u32 *xbar)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ switch (lanes) {
+ case 0x010004:
+ dev_info(dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
+ return 0;
+
+ case 0x010102:
+ dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+
+ case 0x010101:
+ dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
+ return 0;
+
+ default:
+ dev_info(dev, "wrong configuration updated in DT, "
+ "switching to default 2x1, 1x1, 1x1 "
+ "configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ switch (lanes) {
+ case 0x0000104:
+ dev_info(dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+ return 0;
+
+ case 0x0000102:
+ dev_info(dev, "2x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ switch (lanes) {
+ case 0x00000204:
+ dev_info(dev, "4x1, 2x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
+ return 0;
+
+ case 0x00020202:
+ dev_info(dev, "2x3 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
+ return 0;
+
+ case 0x00010104:
+ dev_info(dev, "4x1, 1x2 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ switch (lanes) {
+ case 0x00000004:
+ dev_info(dev, "single-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
+ return 0;
+
+ case 0x00000202:
+ dev_info(dev, "dual-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Check whether a given set of supplies is available in a device tree node.
+ * This is used to check whether the new or the legacy device tree bindings
+ * should be used.
+ */
+static bool of_regulator_bulk_available(struct device_node *np,
+ struct regulator_bulk_data *supplies,
+ unsigned int num_supplies)
+{
+ char property[32];
+ unsigned int i;
+
+ for (i = 0; i < num_supplies; i++) {
+ snprintf(property, 32, "%s-supply", supplies[i].supply);
+
+ if (!of_property_present(np, property))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Old versions of the device tree binding for this device used a set of power
+ * supplies that didn't match the hardware inputs. This happened to work for a
+ * number of cases but is not future proof. However to preserve backwards-
+ * compatibility with old device trees, this function will try to use the old
+ * set of supplies.
+ */
+static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
+ pcie->num_supplies = 3;
+ else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
+ pcie->num_supplies = 2;
+
+ if (pcie->num_supplies == 0) {
+ dev_err(dev, "device %pOF not supported in legacy mode\n", np);
+ return -ENODEV;
+ }
+
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "pex-clk";
+ pcie->supplies[1].supply = "vdd";
+
+ if (pcie->num_supplies > 2)
+ pcie->supplies[2].supply = "avdd";
+
+ return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
+}
+
+/*
+ * Obtains the list of regulators required for a particular generation of the
+ * IP block.
+ *
+ * This would've been nice to do simply by providing static tables for use
+ * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
+ * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
+ * and either seems to be optional depending on which ports are being used.
+ */
+static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int i = 0;
+
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ pcie->num_supplies = 4;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "dvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pexctl-aud";
+ } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ pcie->num_supplies = 3;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "hvddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ pcie->num_supplies = 4;
+
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ bool need_pexa = false, need_pexb = false;
+
+ /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
+ if (lane_mask & 0x0f)
+ need_pexa = true;
+
+ /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
+ if (lane_mask & 0x30)
+ need_pexb = true;
+
+ pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
+ (need_pexb ? 2 : 0);
+
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-plle";
+
+ if (need_pexa) {
+ pcie->supplies[i++].supply = "avdd-pexa";
+ pcie->supplies[i++].supply = "vdd-pexa";
+ }
+
+ if (need_pexb) {
+ pcie->supplies[i++].supply = "avdd-pexb";
+ pcie->supplies[i++].supply = "vdd-pexb";
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ pcie->num_supplies = 5;
+
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "avdd-pex";
+ pcie->supplies[1].supply = "vdd-pex";
+ pcie->supplies[2].supply = "avdd-pex-pll";
+ pcie->supplies[3].supply = "avdd-plle";
+ pcie->supplies[4].supply = "vddio-pex-clk";
+ }
+
+ if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
+ pcie->num_supplies))
+ return devm_regulator_bulk_get(dev, pcie->num_supplies,
+ pcie->supplies);
+
+ /*
+ * If not all regulators are available for this new scheme, assume
+ * that the device tree complies with an older version of the device
+ * tree binding.
+ */
+ dev_info(dev, "using legacy DT binding for power supplies\n");
+
+ devm_kfree(dev, pcie->supplies);
+ pcie->num_supplies = 0;
+
+ return tegra_pcie_get_legacy_regulators(pcie);
+}
+
+static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node, *port;
+ const struct tegra_pcie_soc *soc = pcie->soc;
+ u32 lanes = 0, mask = 0;
+ unsigned int lane = 0;
+ int err;
+
+ /* parse root ports */
+ for_each_child_of_node(np, port) {
+ struct tegra_pcie_port *rp;
+ unsigned int index;
+ u32 value;
+ char *label;
+
+ err = of_pci_get_devfn(port);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+ goto err_node_put;
+ }
+
+ index = PCI_SLOT(err);
+
+ if (index < 1 || index > soc->num_ports) {
+ dev_err(dev, "invalid port number: %d\n", index);
+ err = -EINVAL;
+ goto err_node_put;
+ }
+
+ index--;
+
+ err = of_property_read_u32(port, "nvidia,num-lanes", &value);
+ if (err < 0) {
+ dev_err(dev, "failed to parse # of lanes: %d\n",
+ err);
+ goto err_node_put;
+ }
+
+ if (value > 16) {
+ dev_err(dev, "invalid # of lanes: %u\n", value);
+ err = -EINVAL;
+ goto err_node_put;
+ }
+
+ lanes |= value << (index << 3);
+
+ if (!of_device_is_available(port)) {
+ lane += value;
+ continue;
+ }
+
+ mask |= ((1 << value) - 1) << lane;
+ lane += value;
+
+ rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
+ if (!rp) {
+ err = -ENOMEM;
+ goto err_node_put;
+ }
+
+ err = of_address_to_resource(port, 0, &rp->regs);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+ goto err_node_put;
+ }
+
+ INIT_LIST_HEAD(&rp->list);
+ rp->index = index;
+ rp->lanes = value;
+ rp->pcie = pcie;
+ rp->np = port;
+
+ rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
+ if (IS_ERR(rp->base)) {
+ err = PTR_ERR(rp->base);
+ goto err_node_put;
+ }
+
+ label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+ if (!label) {
+ err = -ENOMEM;
+ goto err_node_put;
+ }
+
+ /*
+ * Returns -ENOENT if reset-gpios property is not populated
+ * and in this case fall back to using AFI per port register
+ * to toggle PERST# SFIO line.
+ */
+ rp->reset_gpio = devm_fwnode_gpiod_get(dev,
+ of_fwnode_handle(port),
+ "reset",
+ GPIOD_OUT_LOW,
+ label);
+ if (IS_ERR(rp->reset_gpio)) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ rp->reset_gpio = NULL;
+ } else {
+ dev_err(dev, "failed to get reset GPIO: %ld\n",
+ PTR_ERR(rp->reset_gpio));
+ err = PTR_ERR(rp->reset_gpio);
+ goto err_node_put;
+ }
+ }
+
+ list_add_tail(&rp->list, &pcie->ports);
+ }
+
+ err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
+ if (err < 0) {
+ dev_err(dev, "invalid lane configuration\n");
+ return err;
+ }
+
+ err = tegra_pcie_get_regulators(pcie, mask);
+ if (err < 0)
+ return err;
+
+ return 0;
+
+err_node_put:
+ of_node_put(port);
+ return err;
+}
+
+/*
+ * FIXME: If there are no PCIe cards attached, then calling this function
+ * can result in the increase of the bootup time as there are big timeout
+ * loops.
+ */
+#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
+static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ unsigned int retries = 3;
+ unsigned long value;
+
+ /* override presence detection */
+ value = readl(port->base + RP_PRIV_MISC);
+ value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+ value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+ writel(value, port->base + RP_PRIV_MISC);
+
+ do {
+ unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_dbg(dev, "link %u down, retrying\n", port->index);
+ goto retry;
+ }
+
+ timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+retry:
+ tegra_pcie_port_reset(port);
+ } while (--retries);
+
+ return false;
+}
+
+static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port;
+ ktime_t deadline;
+ u32 value;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ /*
+ * "Supported Link Speeds Vector" in "Link Capabilities 2"
+ * is not supported by Tegra. tegra_pcie_change_link_speed()
+ * is called only for Tegra chips which support Gen2.
+ * So there no harm if supported link speed is not verified.
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_5_0GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+
+ /*
+ * Poll until link comes back from recovery to avoid race
+ * condition.
+ */
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_warn(dev, "PCIe port %u link is in recovery\n",
+ port->index);
+
+ /* Retrain the link */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ value |= PCI_EXP_LNKCTL_RL;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS);
+
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_err(dev, "failed to retrain link of port %u\n",
+ port->index);
+ }
+}
+
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ dev_info(dev, "probing port %u, using %u lanes\n",
+ port->index, port->lanes);
+
+ tegra_pcie_port_enable(port);
+ }
+
+ /* Start LTSSM from Tegra side */
+ reset_control_deassert(pcie->pcie_xrst);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ if (tegra_pcie_port_check_link(port))
+ continue;
+
+ dev_info(dev, "link %u down, ignoring\n", port->index);
+
+ tegra_pcie_port_disable(port);
+ tegra_pcie_port_free(port);
+ }
+
+ if (pcie->soc->has_gen2)
+ tegra_pcie_change_link_speed(pcie);
+}
+
+static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port, *tmp;
+
+ reset_control_assert(pcie->pcie_xrst);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ tegra_pcie_port_disable(port);
+}
+
+static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
+ { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
+ { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
+};
+
+static const struct tegra_pcie_soc tegra20_pcie = {
+ .num_ports = 2,
+ .ports = tegra20_pcie_ports,
+ .msi_base_shift = 0,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+ .has_pex_clkreq_en = false,
+ .has_pex_bias_ctrl = false,
+ .has_intr_prsnt_sense = false,
+ .has_cml_clk = false,
+ .has_gen2 = false,
+ .force_pca_enable = false,
+ .program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .update_fc_timer = false,
+ .has_cache_bars = true,
+ .ectl.enable = false,
+};
+
+static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
+ { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
+ { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
+ { .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
+};
+
+static const struct tegra_pcie_soc tegra30_pcie = {
+ .num_ports = 3,
+ .ports = tegra30_pcie_ports,
+ .msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x128,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+ .pads_refclk_cfg1 = 0xfa5cfa5c,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = false,
+ .force_pca_enable = false,
+ .program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+};
+
+static const struct tegra_pcie_soc tegra124_pcie = {
+ .num_ports = 2,
+ .ports = tegra20_pcie_ports,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x44ac44ac,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+ .force_pca_enable = false,
+ .program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+ .num_ports = 2,
+ .ports = tegra20_pcie_ports,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x90b890b8,
+ /* FC threshold is bit[25:18] */
+ .update_fc_threshold = 0x01800000,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+ .force_pca_enable = true,
+ .program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = true,
+ .update_fc_timer = true,
+ .has_cache_bars = false,
+ .ectl = {
+ .regs = {
+ .rp_ectl_2_r1 = 0x0000000f,
+ .rp_ectl_4_r1 = 0x00000067,
+ .rp_ectl_5_r1 = 0x55010000,
+ .rp_ectl_6_r1 = 0x00000001,
+ .rp_ectl_2_r2 = 0x0000008f,
+ .rp_ectl_4_r2 = 0x000000c7,
+ .rp_ectl_5_r2 = 0x55010000,
+ .rp_ectl_6_r2 = 0x00000001,
+ },
+ .enable = true,
+ },
+};
+
+static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
+ { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
+ { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
+ { .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
+};
+
+static const struct tegra_pcie_soc tegra186_pcie = {
+ .num_ports = 3,
+ .ports = tegra186_pcie_ports,
+ .msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x19c,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x80b880b8,
+ .pads_refclk_cfg1 = 0x000480b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = false,
+ .has_gen2 = true,
+ .force_pca_enable = false,
+ .program_uphy = false,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+};
+
+static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
+ { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
+ { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
+ { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
+ { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ if (list_empty(&pcie->ports))
+ return NULL;
+
+ seq_puts(s, "Index Status\n");
+
+ return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+ bool up = false, active = false;
+ struct tegra_pcie_port *port;
+ unsigned int value;
+
+ port = list_entry(v, struct tegra_pcie_port, list);
+
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ up = true;
+
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ active = true;
+
+ seq_printf(s, "%2u ", port->index);
+
+ if (up)
+ seq_puts(s, "up");
+
+ if (active) {
+ if (up)
+ seq_puts(s, ", ");
+
+ seq_puts(s, "active");
+ }
+
+ seq_puts(s, "\n");
+ return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_sops = {
+ .start = tegra_pcie_ports_seq_start,
+ .next = tegra_pcie_ports_seq_next,
+ .stop = tegra_pcie_ports_seq_stop,
+ .show = tegra_pcie_ports_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
+
+static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
+{
+ debugfs_remove_recursive(pcie->debugfs);
+ pcie->debugfs = NULL;
+}
+
+static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+ pcie->debugfs = debugfs_create_dir("pcie", NULL);
+
+ debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
+ &tegra_pcie_ports_fops);
+}
+
+static int tegra_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *host;
+ struct tegra_pcie *pcie;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!host)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(host);
+ host->sysdata = pcie;
+ platform_set_drvdata(pdev, pcie);
+
+ pcie->soc = of_device_get_match_data(dev);
+ INIT_LIST_HEAD(&pcie->ports);
+ pcie->dev = dev;
+
+ err = tegra_pcie_parse_dt(pcie);
+ if (err < 0)
+ return err;
+
+ err = tegra_pcie_get_resources(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_msi_setup(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
+ goto put_resources;
+ }
+
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(dev, "fail to enable pcie controller: %d\n", err);
+ goto pm_runtime_put;
+ }
+
+ host->ops = &tegra_pcie_ops;
+ host->map_irq = tegra_pcie_map_irq;
+
+ err = pci_host_probe(host);
+ if (err < 0) {
+ dev_err(dev, "failed to register host: %d\n", err);
+ goto pm_runtime_put;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_pcie_debugfs_init(pcie);
+
+ return 0;
+
+pm_runtime_put:
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+ tegra_pcie_msi_teardown(pcie);
+put_resources:
+ tegra_pcie_put_resources(pcie);
+ return err;
+}
+
+static void tegra_pcie_remove(struct platform_device *pdev)
+{
+ struct tegra_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct tegra_pcie_port *port, *tmp;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_pcie_debugfs_exit(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_msi_teardown(pcie);
+
+ tegra_pcie_put_resources(pcie);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ tegra_pcie_port_free(port);
+}
+
+static int tegra_pcie_pm_suspend(struct device *dev)
+{
+ struct tegra_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_port *port;
+ int err;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ tegra_pcie_pme_turnoff(port);
+
+ tegra_pcie_disable_ports(pcie);
+
+ /*
+ * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
+ * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
+ */
+ tegra_pcie_disable_interrupts(pcie);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
+
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_disable_msi(pcie);
+
+ pinctrl_pm_select_idle_state(dev);
+ tegra_pcie_power_off(pcie);
+
+ return 0;
+}
+
+static int tegra_pcie_pm_resume(struct device *dev)
+{
+ struct tegra_pcie *pcie = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra_pcie_power_on(pcie);
+ if (err) {
+ dev_err(dev, "tegra pcie power on fail: %d\n", err);
+ return err;
+ }
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
+ goto poweroff;
+ }
+
+ tegra_pcie_enable_controller(pcie);
+ tegra_pcie_setup_translations(pcie);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_enable_msi(pcie);
+
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ goto pex_dpd_enable;
+ }
+
+ reset_control_deassert(pcie->pex_rst);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ goto disable_pex_clk;
+ }
+ }
+
+ tegra_pcie_apply_pad_settings(pcie);
+ tegra_pcie_enable_ports(pcie);
+
+ return 0;
+
+disable_pex_clk:
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+pex_dpd_enable:
+ pinctrl_pm_select_idle_state(dev);
+poweroff:
+ tegra_pcie_power_off(pcie);
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra_pcie_pm_ops = {
+ RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
+};
+
+static struct platform_driver tegra_pcie_driver = {
+ .driver = {
+ .name = "tegra-pcie",
+ .of_match_table = tegra_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &tegra_pcie_pm_ops,
+ },
+ .probe = tegra_pcie_probe,
+ .remove_new = tegra_pcie_remove,
+};
+module_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
new file mode 100644
index 000000000..b5bd10a62
--- /dev/null
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015, 2016 Cavium, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/of_pci.h>
+#include <linux/of.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+static void set_val(u32 v, int where, int size, u32 *val)
+{
+ int shift = (where & 3) * 8;
+
+ pr_debug("set_val %04x: %08x\n", (unsigned int)(where & ~3), v);
+ v >>= shift;
+ if (size == 1)
+ v &= 0xff;
+ else if (size == 2)
+ v &= 0xffff;
+ *val = v;
+}
+
+static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ void __iomem *addr;
+ u32 v;
+
+ /* Entries are 16-byte aligned; bits[2,3] select word in entry */
+ int where_a = where & 0xc;
+
+ if (where_a == 0) {
+ set_val(e0, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0x4) {
+ addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr);
+ v &= ~0xf;
+ v |= 2; /* EA entry-1. Base-L */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0x8) {
+ u32 barl_orig;
+ u32 barl_rb;
+
+ addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ barl_orig = readl(addr + 0);
+ writel(0xffffffff, addr + 0);
+ barl_rb = readl(addr + 0);
+ writel(barl_orig, addr + 0);
+ /* zeros in unsettable bits */
+ v = ~barl_rb & ~3;
+ v |= 0xc; /* EA entry-2. Offset-L */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc) {
+ addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr); /* EA entry-3. Base-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int where_a = where & ~3;
+ void __iomem *addr;
+ u32 node_bits;
+ u32 v;
+
+ /* EA Base[63:32] may be missing some bits ... */
+ switch (where_a) {
+ case 0xa8:
+ case 0xbc:
+ case 0xd0:
+ case 0xe4:
+ break;
+ default:
+ return pci_generic_config_read(bus, devfn, where, size, val);
+ }
+
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr);
+
+ /*
+ * Bit 44 of the 64-bit Base must match the same bit in
+ * the config space access window. Since we are working with
+ * the high-order 32 bits, shift everything down by 32 bits.
+ */
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
+
+ v |= node_bits;
+ set_val(v, where, size, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 v;
+ u32 vendor_device;
+ u32 class_rev;
+ void __iomem *addr;
+ int cfg_type;
+ int where_a = where & ~3;
+
+ addr = bus->ops->map_bus(bus, devfn, 0xc);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr);
+
+ /* Check for non type-00 header */
+ cfg_type = (v >> 16) & 0x7f;
+
+ addr = bus->ops->map_bus(bus, devfn, 8);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ class_rev = readl(addr);
+ if (class_rev == 0xffffffff)
+ goto no_emulation;
+
+ if ((class_rev & 0xff) >= 8) {
+ /* Pass-2 handling */
+ if (cfg_type)
+ goto no_emulation;
+ return thunder_ecam_p2_config_read(bus, devfn, where,
+ size, val);
+ }
+
+ /*
+ * All BARs have fixed addresses specified by the EA
+ * capability; they must return zero on read.
+ */
+ if (cfg_type == 0 &&
+ ((where >= 0x10 && where < 0x2c) ||
+ (where >= 0x1a4 && where < 0x1bc))) {
+ /* BAR or SR-IOV BAR */
+ *val = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = bus->ops->map_bus(bus, devfn, 0);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ vendor_device = readl(addr);
+ if (vendor_device == 0xffffffff)
+ goto no_emulation;
+
+ pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n",
+ vendor_device & 0xffff, vendor_device >> 16, class_rev,
+ (unsigned int)where, devfn);
+
+ /* Check for non type-00 header */
+ if (cfg_type == 0) {
+ bool has_msix;
+ bool is_nic = (vendor_device == 0xa01e177d);
+ bool is_tns = (vendor_device == 0xa01f177d);
+
+ addr = bus->ops->map_bus(bus, devfn, 0x70);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* E_CAP */
+ v = readl(addr);
+ has_msix = (v & 0xff00) != 0;
+
+ if (!has_msix && where_a == 0x70) {
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xb0) {
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr);
+ if (v & 0xff00)
+ pr_err("Bad MSIX cap header: %08x\n", v);
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xbc) {
+ if (is_nic)
+ v = 0x40014; /* EA last in chain, 4 entries */
+ else if (is_tns)
+ v = 0x30014; /* EA last in chain, 3 entries */
+ else if (has_msix)
+ v = 0x20014; /* EA last in chain, 2 entries */
+ else
+ v = 0x10014; /* EA last in chain, 1 entry */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a >= 0xc0 && where_a < 0xd0)
+ /* EA entry-0. PP=0, BAR0 Size:3 */
+ return handle_ea_bar(0x80ff0003,
+ 0x10, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xd0 && where_a < 0xe0 && has_msix)
+ /* EA entry-1. PP=0, BAR4 Size:3 */
+ return handle_ea_bar(0x80ff0043,
+ 0x20, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xe0 && where_a < 0xf0 && is_tns)
+ /* EA entry-2. PP=0, BAR2, Size:3 */
+ return handle_ea_bar(0x80ff0023,
+ 0x18, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xe0 && where_a < 0xf0 && is_nic)
+ /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */
+ return handle_ea_bar(0x80ff0493,
+ 0x1a4, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xf0 && where_a < 0x100 && is_nic)
+ /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */
+ return handle_ea_bar(0x80ff04d3,
+ 0x1b4, bus, devfn, where,
+ size, val);
+ } else if (cfg_type == 1) {
+ bool is_rsl_bridge = devfn == 0x08;
+ bool is_rad_bridge = devfn == 0xa0;
+ bool is_zip_bridge = devfn == 0xa8;
+ bool is_dfa_bridge = devfn == 0xb0;
+ bool is_nic_bridge = devfn == 0x10;
+
+ if (where_a == 0x70) {
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ v = readl(addr);
+ if (v & 0xff00)
+ pr_err("Bad PCIe cap header: %08x\n", v);
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xbc) {
+ if (is_nic_bridge)
+ v = 0x10014; /* EA last in chain, 1 entry */
+ else
+ v = 0x00014; /* EA last in chain, no entries */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc0) {
+ if (is_rsl_bridge || is_nic_bridge)
+ v = 0x0101; /* subordinate:secondary = 1:1 */
+ else if (is_rad_bridge)
+ v = 0x0202; /* subordinate:secondary = 2:2 */
+ else if (is_zip_bridge)
+ v = 0x0303; /* subordinate:secondary = 3:3 */
+ else if (is_dfa_bridge)
+ v = 0x0404; /* subordinate:secondary = 4:4 */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc4 && is_nic_bridge) {
+ /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */
+ v = 0x80ff0564;
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc8 && is_nic_bridge) {
+ v = 0x00000002; /* Base-L 64-bit */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xcc && is_nic_bridge) {
+ v = 0xfffffffe; /* MaxOffset-L 64-bit */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xd0 && is_nic_bridge) {
+ v = 0x00008430; /* NIC Base-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xd4 && is_nic_bridge) {
+ v = 0x0000000f; /* MaxOffset-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+no_emulation:
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ /*
+ * All BARs have fixed addresses; ignore BAR writes so they
+ * don't get corrupted.
+ */
+ if ((where >= 0x10 && where < 0x2c) ||
+ (where >= 0x1a4 && where < 0x1bc))
+ /* BAR or SR-IOV BAR */
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+const struct pci_ecam_ops pci_thunder_ecam_ops = {
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_ecam_config_read,
+ .write = thunder_ecam_config_write,
+ }
+};
+
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
+static const struct of_device_id thunder_ecam_of_match[] = {
+ {
+ .compatible = "cavium,pci-host-thunder-ecam",
+ .data = &pci_thunder_ecam_ops,
+ },
+ { },
+};
+
+static struct platform_driver thunder_ecam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = thunder_ecam_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pci_host_common_probe,
+};
+builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
new file mode 100644
index 000000000..06a9855cb
--- /dev/null
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 - 2016 Cavium, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+#define PEM_CFG_WR 0x28
+#define PEM_CFG_RD 0x30
+
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * N.B. This is a non-standard platform-specific ECAM bus shift value. For
+ * standard values defined in the PCI Express Base Specification see
+ * include/linux/pci-ecam.h.
+ */
+#define THUNDER_PCIE_ECAM_BUS_SHIFT 24
+
+struct thunder_pem_pci {
+ u32 ea_entry[3];
+ void __iomem *pem_reg_base;
+};
+
+static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u64 read_val, tmp_val;
+ struct pci_config_window *cfg = bus->sysdata;
+ struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * 32-bit accesses only. Write the address to the low order
+ * bits of PEM_CFG_RD, then trigger the read by reading back.
+ * The config data lands in the upper 32-bits of PEM_CFG_RD.
+ */
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+
+ /*
+ * The config space contains some garbage, fix it up. Also
+ * synthesize an EA capability for the BAR used by MSI-X.
+ */
+ switch (where & ~3) {
+ case 0x40:
+ read_val &= 0xffff00ff;
+ read_val |= 0x00007000; /* Skip MSI CAP */
+ break;
+ case 0x70: /* Express Cap */
+ /*
+ * Change PME interrupt to vector 2 on T88 where it
+ * reads as 0, else leave it alone.
+ */
+ if (!(read_val & (0x1f << 25)))
+ read_val |= (2u << 25);
+ break;
+ case 0xb0: /* MSI-X Cap */
+ /* TableSize=2 or 4, Next Cap is EA */
+ read_val &= 0xc00000ff;
+ /*
+ * If Express Cap(0x70) raw PME vector reads as 0 we are on
+ * T88 and TableSize is reported as 4, else TableSize
+ * is 2.
+ */
+ writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD);
+ tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ tmp_val >>= 32;
+ if (!(tmp_val & (0x1f << 25)))
+ read_val |= 0x0003bc00;
+ else
+ read_val |= 0x0001bc00;
+ break;
+ case 0xb4:
+ /* Table offset=0, BIR=0 */
+ read_val = 0x00000000;
+ break;
+ case 0xb8:
+ /* BPA offset=0xf0000, BIR=0 */
+ read_val = 0x000f0000;
+ break;
+ case 0xbc:
+ /* EA, 1 entry, no next Cap */
+ read_val = 0x00010014;
+ break;
+ case 0xc0:
+ /* DW2 for type-1 */
+ read_val = 0x00000000;
+ break;
+ case 0xc4:
+ /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */
+ read_val = 0x80ff0003;
+ break;
+ case 0xc8:
+ read_val = pem_pci->ea_entry[0];
+ break;
+ case 0xcc:
+ read_val = pem_pci->ea_entry[1];
+ break;
+ case 0xd0:
+ read_val = pem_pci->ea_entry[2];
+ break;
+ default:
+ break;
+ }
+ read_val >>= (8 * (where & 3));
+ switch (size) {
+ case 1:
+ read_val &= 0xff;
+ break;
+ case 2:
+ read_val &= 0xffff;
+ break;
+ default:
+ break;
+ }
+ *val = read_val;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return thunder_pem_bridge_read(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
+{
+ u32 w1c_bits = 0;
+
+ switch (where_aligned) {
+ case 0x04: /* Command/Status */
+ case 0x1c: /* Base and I/O Limit/Secondary Status */
+ w1c_bits = 0xff000000;
+ break;
+ case 0x44: /* Power Management Control and Status */
+ w1c_bits = 0xfffffe00;
+ break;
+ case 0x78: /* Device Control/Device Status */
+ case 0x80: /* Link Control/Link Status */
+ case 0x88: /* Slot Control/Slot Status */
+ case 0x90: /* Root Status */
+ case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+ w1c_bits = 0xffff0000;
+ break;
+ case 0x104: /* Uncorrectable Error Status */
+ case 0x110: /* Correctable Error Status */
+ case 0x130: /* Error Status */
+ case 0x160: /* Link Control 4 */
+ w1c_bits = 0xffffffff;
+ break;
+ default:
+ break;
+ }
+ return w1c_bits;
+}
+
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
+static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
+ u64 write_val, read_val;
+ u64 where_aligned = where & ~3ull;
+ u32 mask = 0;
+
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * 32-bit accesses only. If the write is for a size smaller
+ * than 32-bits, we must first read the 32-bit value and merge
+ * in the desired bits and then write the whole 32-bits back
+ * out.
+ */
+ switch (size) {
+ case 1:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ case 2:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xffff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xffff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * By expanding the write width to 32 bits, we may
+ * inadvertently hit some W1C bits that were not intended to
+ * be written. Calculate the mask that must be applied to the
+ * data to be written to avoid these cases.
+ */
+ if (mask) {
+ u32 w1c_bits = thunder_pem_bridge_w1c_bits(where);
+
+ if (w1c_bits) {
+ mask &= w1c_bits;
+ val &= ~mask;
+ }
+ }
+
+ /*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= thunder_pem_bridge_w1_bits(where_aligned);
+
+ /*
+ * Low order bits are the config address, the high order 32
+ * bits are the data to be written.
+ */
+ write_val = (((u64)val) << 32) | where_aligned;
+ writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return thunder_pem_bridge_write(bus, devfn, where, size, val);
+
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
+{
+ struct thunder_pem_pci *pem_pci;
+ resource_size_t bar4_start;
+
+ pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+ if (!pem_pci)
+ return -ENOMEM;
+
+ pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+ if (!pem_pci->pem_reg_base)
+ return -ENOMEM;
+
+ /*
+ * The MSI-X BAR for the PEM and AER interrupts is located at
+ * a fixed offset from the PEM register base. Generate a
+ * fragment of the synthesized Enhanced Allocation capability
+ * structure here for the BAR.
+ */
+ bar4_start = res_pem->start + 0xf00000;
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
+
+ cfg->priv = pem_pci;
+ return 0;
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
+#define PEM_MIN_DOM_IN_NODE 4
+#define PEM_MAX_DOM_IN_NODE 10
+
+static void thunder_pem_reserve_range(struct device *dev, int seg,
+ struct resource *r)
+{
+ resource_size_t start = r->start, end = r->end;
+ struct resource *res;
+ const char *regionid;
+
+ regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
+ if (!regionid)
+ return;
+
+ res = request_mem_region(start, end - start + 1, regionid);
+ if (res)
+ res->flags &= ~IORESOURCE_BUSY;
+ else
+ kfree(regionid);
+
+ dev_info(dev, "%pR %s reserved\n", r,
+ res ? "has been" : "could not be");
+}
+
+static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
+ struct resource *res_pem)
+{
+ int node = acpi_get_node(root->device->handle);
+ int index;
+
+ if (node == NUMA_NO_NODE)
+ node = 0;
+
+ index = root->segment - PEM_MIN_DOM_IN_NODE;
+ index -= node * PEM_MAX_DOM_IN_NODE;
+ res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
+ FIELD_PREP(PEM_INDX_MASK, index);
+ res_pem->flags = IORESOURCE_MEM;
+}
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res_pem;
+ int ret;
+
+ res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+ if (!res_pem)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
+
+ /*
+ * If we fail to gather resources it means that we run with old
+ * FW where we need to calculate PEM-specific resources manually.
+ */
+ if (ret) {
+ thunder_pem_legacy_fw(root, res_pem);
+ /*
+ * Reserve 64K size PEM specific resources. The full 16M range
+ * size is required for thunder_pem_init() call.
+ */
+ res_pem->end = res_pem->start + SZ_64K - 1;
+ thunder_pem_reserve_range(dev, root->segment, res_pem);
+ res_pem->end = res_pem->start + SZ_16M - 1;
+
+ /* Reserve PCI configuration space as well. */
+ thunder_pem_reserve_range(dev, root->segment, &cfg->res);
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
+const struct pci_ecam_ops thunder_pem_ecam_ops = {
+ .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT,
+ .init = thunder_pem_acpi_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
+static const struct pci_ecam_ops pci_thunder_pem_ops = {
+ .bus_shift = THUNDER_PCIE_ECAM_BUS_SHIFT,
+ .init = thunder_pem_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+static const struct of_device_id thunder_pem_of_match[] = {
+ {
+ .compatible = "cavium,pci-host-thunder-pem",
+ .data = &pci_thunder_pem_ops,
+ },
+ { },
+};
+
+static struct platform_driver thunder_pem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = thunder_pem_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pci_host_common_probe,
+};
+builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
new file mode 100644
index 000000000..460a82532
--- /dev/null
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for V3 Semiconductor PCI Local Bus to PCI Bridge
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the code from arch/arm/mach-integrator/pci_v3.c
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
+ *
+ * Contributors to the old driver include:
+ * Russell King <linux@armlinux.org.uk>
+ * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite)
+ * Rob Herring <robh@kernel.org>
+ * Liviu Dudau <Liviu.Dudau@arm.com>
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Arnd Bergmann <arnd@arndb.de>
+ * Bjorn Helgaas <bhelgaas@google.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include "../pci.h"
+
+#define V3_PCI_VENDOR 0x00000000
+#define V3_PCI_DEVICE 0x00000002
+#define V3_PCI_CMD 0x00000004
+#define V3_PCI_STAT 0x00000006
+#define V3_PCI_CC_REV 0x00000008
+#define V3_PCI_HDR_CFG 0x0000000C
+#define V3_PCI_IO_BASE 0x00000010
+#define V3_PCI_BASE0 0x00000014
+#define V3_PCI_BASE1 0x00000018
+#define V3_PCI_SUB_VENDOR 0x0000002C
+#define V3_PCI_SUB_ID 0x0000002E
+#define V3_PCI_ROM 0x00000030
+#define V3_PCI_BPARAM 0x0000003C
+#define V3_PCI_MAP0 0x00000040
+#define V3_PCI_MAP1 0x00000044
+#define V3_PCI_INT_STAT 0x00000048
+#define V3_PCI_INT_CFG 0x0000004C
+#define V3_LB_BASE0 0x00000054
+#define V3_LB_BASE1 0x00000058
+#define V3_LB_MAP0 0x0000005E
+#define V3_LB_MAP1 0x00000062
+#define V3_LB_BASE2 0x00000064
+#define V3_LB_MAP2 0x00000066
+#define V3_LB_SIZE 0x00000068
+#define V3_LB_IO_BASE 0x0000006E
+#define V3_FIFO_CFG 0x00000070
+#define V3_FIFO_PRIORITY 0x00000072
+#define V3_FIFO_STAT 0x00000074
+#define V3_LB_ISTAT 0x00000076
+#define V3_LB_IMASK 0x00000077
+#define V3_SYSTEM 0x00000078
+#define V3_LB_CFG 0x0000007A
+#define V3_PCI_CFG 0x0000007C
+#define V3_DMA_PCI_ADR0 0x00000080
+#define V3_DMA_PCI_ADR1 0x00000090
+#define V3_DMA_LOCAL_ADR0 0x00000084
+#define V3_DMA_LOCAL_ADR1 0x00000094
+#define V3_DMA_LENGTH0 0x00000088
+#define V3_DMA_LENGTH1 0x00000098
+#define V3_DMA_CSR0 0x0000008B
+#define V3_DMA_CSR1 0x0000009B
+#define V3_DMA_CTLB_ADR0 0x0000008C
+#define V3_DMA_CTLB_ADR1 0x0000009C
+#define V3_DMA_DELAY 0x000000E0
+#define V3_MAIL_DATA 0x000000C0
+#define V3_PCI_MAIL_IEWR 0x000000D0
+#define V3_PCI_MAIL_IERD 0x000000D2
+#define V3_LB_MAIL_IEWR 0x000000D4
+#define V3_LB_MAIL_IERD 0x000000D6
+#define V3_MAIL_WR_STAT 0x000000D8
+#define V3_MAIL_RD_STAT 0x000000DA
+#define V3_QBA_MAP 0x000000DC
+
+/* PCI STATUS bits */
+#define V3_PCI_STAT_PAR_ERR BIT(15)
+#define V3_PCI_STAT_SYS_ERR BIT(14)
+#define V3_PCI_STAT_M_ABORT_ERR BIT(13)
+#define V3_PCI_STAT_T_ABORT_ERR BIT(12)
+
+/* LB ISTAT bits */
+#define V3_LB_ISTAT_MAILBOX BIT(7)
+#define V3_LB_ISTAT_PCI_RD BIT(6)
+#define V3_LB_ISTAT_PCI_WR BIT(5)
+#define V3_LB_ISTAT_PCI_INT BIT(4)
+#define V3_LB_ISTAT_PCI_PERR BIT(3)
+#define V3_LB_ISTAT_I2O_QWR BIT(2)
+#define V3_LB_ISTAT_DMA1 BIT(1)
+#define V3_LB_ISTAT_DMA0 BIT(0)
+
+/* PCI COMMAND bits */
+#define V3_COMMAND_M_FBB_EN BIT(9)
+#define V3_COMMAND_M_SERR_EN BIT(8)
+#define V3_COMMAND_M_PAR_EN BIT(6)
+#define V3_COMMAND_M_MASTER_EN BIT(2)
+#define V3_COMMAND_M_MEM_EN BIT(1)
+#define V3_COMMAND_M_IO_EN BIT(0)
+
+/* SYSTEM bits */
+#define V3_SYSTEM_M_RST_OUT BIT(15)
+#define V3_SYSTEM_M_LOCK BIT(14)
+#define V3_SYSTEM_UNLOCK 0xa05f
+
+/* PCI CFG bits */
+#define V3_PCI_CFG_M_I2O_EN BIT(15)
+#define V3_PCI_CFG_M_IO_REG_DIS BIT(14)
+#define V3_PCI_CFG_M_IO_DIS BIT(13)
+#define V3_PCI_CFG_M_EN3V BIT(12)
+#define V3_PCI_CFG_M_RETRY_EN BIT(10)
+#define V3_PCI_CFG_M_AD_LOW1 BIT(9)
+#define V3_PCI_CFG_M_AD_LOW0 BIT(8)
+/*
+ * This is the value applied to C/BE[3:1], with bit 0 always held 0
+ * during DMA access.
+ */
+#define V3_PCI_CFG_M_RTYPE_SHIFT 5
+#define V3_PCI_CFG_M_WTYPE_SHIFT 1
+#define V3_PCI_CFG_TYPE_DEFAULT 0x3
+
+/* PCI BASE bits (PCI -> Local Bus) */
+#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U
+#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U
+#define V3_PCI_BASE_M_PREFETCH BIT(3)
+#define V3_PCI_BASE_M_TYPE (3 << 1)
+#define V3_PCI_BASE_M_IO BIT(0)
+
+/* PCI MAP bits (PCI -> Local bus) */
+#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U
+#define V3_PCI_MAP_M_RD_POST_INH BIT(15)
+#define V3_PCI_MAP_M_ROM_SIZE (3 << 10)
+#define V3_PCI_MAP_M_SWAP (3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U
+#define V3_PCI_MAP_M_REG_EN BIT(1)
+#define V3_PCI_MAP_M_ENABLE BIT(0)
+
+/* LB_BASE0,1 bits (Local bus -> PCI) */
+#define V3_LB_BASE_ADR_BASE 0xfff00000U
+#define V3_LB_BASE_SWAP (3 << 8)
+#define V3_LB_BASE_ADR_SIZE (15 << 4)
+#define V3_LB_BASE_PREFETCH BIT(3)
+#define V3_LB_BASE_ENABLE BIT(0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4)
+
+#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE)
+
+/* LB_MAP0,1 bits (Local bus -> PCI) */
+#define V3_LB_MAP_MAP_ADR 0xfff0U
+#define V3_LB_MAP_TYPE (7 << 1)
+#define V3_LB_MAP_AD_LOW_EN BIT(0)
+
+#define V3_LB_MAP_TYPE_IACK (0 << 1)
+#define V3_LB_MAP_TYPE_IO (1 << 1)
+#define V3_LB_MAP_TYPE_MEM (3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG (5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1)
+
+#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/* LB_BASE2 bits (Local bus -> PCI IO) */
+#define V3_LB_BASE2_ADR_BASE 0xff00U
+#define V3_LB_BASE2_SWAP_AUTO (3 << 6)
+#define V3_LB_BASE2_ENABLE BIT(0)
+
+#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/* LB_MAP2 bits (Local bus -> PCI IO) */
+#define V3_LB_MAP2_MAP_ADR 0xff00U
+
+#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+/* FIFO priority bits */
+#define V3_FIFO_PRIO_LOCAL BIT(12)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11))
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9))
+#define V3_FIFO_PRIO_PCI BIT(4)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3))
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1))
+
+/* Local bus configuration bits */
+#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000
+#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13)
+#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14)
+#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14))
+#define V3_LB_CFG_LB_RST BIT(12)
+#define V3_LB_CFG_LB_PPC_RDY BIT(11)
+#define V3_LB_CFG_LB_LB_INT BIT(10)
+#define V3_LB_CFG_LB_ERR_EN BIT(9)
+#define V3_LB_CFG_LB_RDY_EN BIT(8)
+#define V3_LB_CFG_LB_BE_IMODE BIT(7)
+#define V3_LB_CFG_LB_BE_OMODE BIT(6)
+#define V3_LB_CFG_LB_ENDIAN BIT(5)
+#define V3_LB_CFG_LB_PARK_EN BIT(4)
+#define V3_LB_CFG_LB_FBB_DIS BIT(2)
+
+/* ARM Integrator-specific extended control registers */
+#define INTEGRATOR_SC_PCI_OFFSET 0x18
+#define INTEGRATOR_SC_PCI_ENABLE BIT(0)
+#define INTEGRATOR_SC_PCI_INTCLR BIT(1)
+#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20
+#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24
+
+struct v3_pci {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *config_base;
+ u32 config_mem;
+ u32 non_pre_mem;
+ u32 pre_mem;
+ phys_addr_t non_pre_bus_addr;
+ phys_addr_t pre_bus_addr;
+ struct regmap *map;
+};
+
+/*
+ * The V3 PCI interface chip in Integrator provides several windows from
+ * local bus memory into the PCI memory areas. Unfortunately, there
+ * are not really enough windows for our usage, therefore we reuse
+ * one of the windows for access to PCI configuration space. On the
+ * Integrator/AP, the memory map is as follows:
+ *
+ * Local Bus Memory Usage
+ *
+ * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable
+ * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable
+ * 60000000 - 60FFFFFF PCI IO. 16M
+ * 61000000 - 61FFFFFF PCI Configuration. 16M
+ *
+ * There are three V3 windows, each described by a pair of V3 registers.
+ * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2.
+ * Base0 and Base1 can be used for any type of PCI memory access. Base2
+ * can be used either for PCI I/O or for I20 accesses. By default, uHAL
+ * uses this only for PCI IO space.
+ *
+ * Normally these spaces are mapped using the following base registers:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF
+ *
+ * This means that I20 and PCI configuration space accesses will fail.
+ * When PCI configuration accesses are needed (via the uHAL PCI
+ * configuration space primitives) we must remap the spaces as follows:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1
+ *
+ * To make this work, the code depends on overlapping windows working.
+ * The V3 chip translates an address by checking its range within
+ * each of the BASE/MAP pairs in turn (in ascending register number
+ * order). It will use the first matching pair. So, for example,
+ * if the same address is mapped by both LB_BASE0/LB_MAP0 and
+ * LB_BASE1/LB_MAP1, the V3 will use the translation from
+ * LB_BASE0/LB_MAP0.
+ *
+ * To allow PCI Configuration space access, the code enlarges the
+ * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes
+ * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can
+ * be remapped for use by configuration cycles.
+ *
+ * At the end of the PCI Configuration space accesses,
+ * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window
+ * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to
+ * reveal the now restored LB_BASE1/LB_MAP1 window.
+ *
+ * NOTE: We do not set up I2O mapping. I suspect that this is only
+ * for an intelligent (target) device. Using I2O disables most of
+ * the mappings into PCI memory.
+ */
+static void __iomem *v3_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ unsigned int address, mapaddress, busnr;
+
+ busnr = bus->number;
+ if (busnr == 0) {
+ int slot = PCI_SLOT(devfn);
+
+ /*
+ * local bus segment so need a type 0 config cycle
+ *
+ * build the PCI configuration "address" with one-hot in
+ * A31-A11
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 are 0 (0)
+ */
+ address = PCI_FUNC(devfn) << 8;
+ mapaddress = V3_LB_MAP_TYPE_CONFIG;
+
+ if (slot > 12)
+ /*
+ * high order bits are handled by the MAP register
+ */
+ mapaddress |= BIT(slot - 5);
+ else
+ /*
+ * low order bits handled directly in the address
+ */
+ address |= BIT(slot + 11);
+ } else {
+ /*
+ * not the local bus segment so need a type 1 config cycle
+ *
+ * address:
+ * 23:16 = bus number
+ * 15:11 = slot number (7:3 of devfn)
+ * 10:8 = func number (2:0 of devfn)
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 from host bus (1)
+ */
+ mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN;
+ address = (busnr << 16) | (devfn << 8);
+ }
+
+ /*
+ * Set up base0 to see all 512Mbytes of memory space (not
+ * prefetchable), this frees up base1 for re-use by
+ * configuration memory
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+
+ /*
+ * Set up base1/map1 to point into configuration space.
+ * The config mem is always 16MB.
+ */
+ writel(v3_addr_to_lb_base(v3->config_mem) |
+ V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(mapaddress, v3->base + V3_LB_MAP1);
+
+ return v3->config_base + address + offset;
+}
+
+static void v3_unmap_bus(struct v3_pci *v3)
+{
+ /*
+ * Reassign base1 for use by prefetchable PCI memory
+ */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+
+ /*
+ * And shrink base0 back to a 256M window (NOTE: MAP0 already correct)
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+}
+
+static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+ ret = pci_generic_config_read(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+ ret = pci_generic_config_write(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static struct pci_ops v3_pci_ops = {
+ .map_bus = v3_map_bus,
+ .read = v3_pci_read_config,
+ .write = v3_pci_write_config,
+};
+
+static irqreturn_t v3_irq(int irq, void *data)
+{
+ struct v3_pci *v3 = data;
+ struct device *dev = v3->dev;
+ u32 status;
+
+ status = readw(v3->base + V3_PCI_STAT);
+ if (status & V3_PCI_STAT_PAR_ERR)
+ dev_err(dev, "parity error interrupt\n");
+ if (status & V3_PCI_STAT_SYS_ERR)
+ dev_err(dev, "system error interrupt\n");
+ if (status & V3_PCI_STAT_M_ABORT_ERR)
+ dev_err(dev, "master abort error interrupt\n");
+ if (status & V3_PCI_STAT_T_ABORT_ERR)
+ dev_err(dev, "target abort error interrupt\n");
+ writew(status, v3->base + V3_PCI_STAT);
+
+ status = readb(v3->base + V3_LB_ISTAT);
+ if (status & V3_LB_ISTAT_MAILBOX)
+ dev_info(dev, "PCI mailbox interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_RD)
+ dev_err(dev, "PCI target LB->PCI READ abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_WR)
+ dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_INT)
+ dev_info(dev, "PCI pin interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_PERR)
+ dev_err(dev, "PCI parity error interrupt\n");
+ if (status & V3_LB_ISTAT_I2O_QWR)
+ dev_info(dev, "I2O inbound post queue interrupt\n");
+ if (status & V3_LB_ISTAT_DMA1)
+ dev_info(dev, "DMA channel 1 interrupt\n");
+ if (status & V3_LB_ISTAT_DMA0)
+ dev_info(dev, "DMA channel 0 interrupt\n");
+ /* Clear all possible interrupts on the local bus */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ if (v3->map)
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ return IRQ_HANDLED;
+}
+
+static int v3_integrator_init(struct v3_pci *v3)
+{
+ unsigned int val;
+
+ v3->map =
+ syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon");
+ if (IS_ERR(v3->map)) {
+ dev_err(v3->dev, "no syscon\n");
+ return -ENODEV;
+ }
+
+ regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
+ /* Take the PCI bridge out of reset, clear IRQs */
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ if (!(val & INTEGRATOR_SC_PCI_ENABLE)) {
+ /* If we were in reset we need to sleep a bit */
+ msleep(230);
+
+ /* Set the physical base for the controller itself */
+ writel(0x6200, v3->base + V3_LB_IO_BASE);
+
+ /* Wait for the mailbox to settle after reset */
+ do {
+ writeb(0xaa, v3->base + V3_MAIL_DATA);
+ writeb(0x55, v3->base + V3_MAIL_DATA + 4);
+ } while (readb(v3->base + V3_MAIL_DATA) != 0xaa &&
+ readb(v3->base + V3_MAIL_DATA) != 0x55);
+ }
+
+ dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n");
+
+ return 0;
+}
+
+static int v3_pci_setup_resource(struct v3_pci *v3,
+ struct pci_host_bridge *host,
+ struct resource_entry *win)
+{
+ struct device *dev = v3->dev;
+ struct resource *mem;
+ struct resource *io;
+
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+
+ /* Setup window 2 - PCI I/O */
+ writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) |
+ V3_LB_BASE2_ENABLE,
+ v3->base + V3_LB_BASE2);
+ writew(v3_addr_to_lb_map2(io->start - win->offset),
+ v3->base + V3_LB_MAP2);
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ if (mem->flags & IORESOURCE_PREFETCH) {
+ mem->name = "V3 PCI PRE-MEM";
+ v3->pre_mem = mem->start;
+ v3->pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev, "prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ if (v3->non_pre_mem &&
+ (mem->start != v3->non_pre_mem + SZ_256M)) {
+ dev_err(dev,
+ "prefetchable memory is not adjacent to non-prefetchable memory\n");
+ return -EINVAL;
+ }
+ /* Setup window 1 - PCI prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+ } else {
+ mem->name = "V3 PCI NON-PRE-MEM";
+ v3->non_pre_mem = mem->start;
+ v3->non_pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->non_pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev,
+ "non-prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ /* Setup window 0 - PCI non-prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+ writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM,
+ v3->base + V3_LB_MAP0);
+ }
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ dev_info(dev, "Unknown resource type %lu\n",
+ resource_type(win->res));
+ break;
+ }
+
+ return 0;
+}
+
+static int v3_get_dma_range_config(struct v3_pci *v3,
+ struct resource_entry *entry,
+ u32 *pci_base, u32 *pci_map)
+{
+ struct device *dev = v3->dev;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_end = cpu_end - entry->offset;
+ u64 pci_addr = entry->res->start - entry->offset;
+ u32 val;
+
+ if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ *pci_base = val;
+
+ if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+
+ switch (resource_size(entry->res)) {
+ case SZ_1M:
+ val |= V3_LB_BASE_ADR_SIZE_1MB;
+ break;
+ case SZ_2M:
+ val |= V3_LB_BASE_ADR_SIZE_2MB;
+ break;
+ case SZ_4M:
+ val |= V3_LB_BASE_ADR_SIZE_4MB;
+ break;
+ case SZ_8M:
+ val |= V3_LB_BASE_ADR_SIZE_8MB;
+ break;
+ case SZ_16M:
+ val |= V3_LB_BASE_ADR_SIZE_16MB;
+ break;
+ case SZ_32M:
+ val |= V3_LB_BASE_ADR_SIZE_32MB;
+ break;
+ case SZ_64M:
+ val |= V3_LB_BASE_ADR_SIZE_64MB;
+ break;
+ case SZ_128M:
+ val |= V3_LB_BASE_ADR_SIZE_128MB;
+ break;
+ case SZ_256M:
+ val |= V3_LB_BASE_ADR_SIZE_256MB;
+ break;
+ case SZ_512M:
+ val |= V3_LB_BASE_ADR_SIZE_512MB;
+ break;
+ case SZ_1G:
+ val |= V3_LB_BASE_ADR_SIZE_1GB;
+ break;
+ case SZ_2G:
+ val |= V3_LB_BASE_ADR_SIZE_2GB;
+ break;
+ default:
+ dev_err(v3->dev, "illegal dma memory chunk size\n");
+ return -EINVAL;
+ }
+ val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
+ *pci_map = val;
+
+ dev_dbg(dev,
+ "DMA MEM CPU: 0x%016llx -> 0x%016llx => "
+ "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
+ cpu_addr, cpu_end,
+ pci_addr, pci_end,
+ *pci_base, *pci_map);
+
+ return 0;
+}
+
+static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
+ struct device_node *np)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3);
+ struct device *dev = v3->dev;
+ struct resource_entry *entry;
+ int i = 0;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ int ret;
+ u32 pci_base, pci_map;
+
+ ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map);
+ if (ret)
+ return ret;
+
+ if (i == 0) {
+ writel(pci_base, v3->base + V3_PCI_BASE0);
+ writel(pci_map, v3->base + V3_PCI_MAP0);
+ } else if (i == 1) {
+ writel(pci_base, v3->base + V3_PCI_BASE1);
+ writel(pci_map, v3->base + V3_PCI_MAP1);
+ } else {
+ dev_err(dev, "too many ranges, only two supported\n");
+ dev_err(dev, "range %d ignored\n", i);
+ }
+ i++;
+ }
+ return 0;
+}
+
+static int v3_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *regs;
+ struct resource_entry *win;
+ struct v3_pci *v3;
+ struct pci_host_bridge *host;
+ struct clk *clk;
+ u16 val;
+ int irq;
+ int ret;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
+ if (!host)
+ return -ENOMEM;
+
+ host->ops = &v3_pci_ops;
+ v3 = pci_host_bridge_priv(host);
+ host->sysdata = v3;
+ v3->dev = dev;
+
+ /* Get and enable host clock */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clock not found\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "unable to enable clock\n");
+ return ret;
+ }
+
+ v3->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(v3->base))
+ return PTR_ERR(v3->base);
+ /*
+ * The hardware has a register with the physical base address
+ * of the V3 controller itself, verify that this is the same
+ * as the physical memory we've remapped it from.
+ */
+ if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16))
+ dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n",
+ readl(v3->base + V3_LB_IO_BASE), regs);
+
+ /* Configuration space is 16MB directly mapped */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (resource_size(regs) != SZ_16M) {
+ dev_err(dev, "config mem is not 16MB!\n");
+ return -EINVAL;
+ }
+ v3->config_mem = regs->start;
+ v3->config_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->config_base))
+ return PTR_ERR(v3->config_base);
+
+ /* Get and request error IRQ resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, v3_irq, 0,
+ "PCIv3 error", v3);
+ if (ret < 0) {
+ dev_err(dev,
+ "unable to request PCIv3 error IRQ %d (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+ */
+ if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK)
+ writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM);
+
+ /* Disable all slave access while we set up the windows */
+ val = readw(v3->base + V3_PCI_CMD);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Put the PCI bus into reset */
+ val = readw(v3->base + V3_SYSTEM);
+ val &= ~V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /* Retry until we're ready */
+ val = readw(v3->base + V3_PCI_CFG);
+ val |= V3_PCI_CFG_M_RETRY_EN;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /* Set up the local bus protocol */
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */
+ val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */
+ val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */
+ val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */
+ writew(val, v3->base + V3_LB_CFG);
+
+ /* Enable the PCI bus master */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MASTER;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &host->windows) {
+ ret = v3_pci_setup_resource(v3, host, win);
+ if (ret) {
+ dev_err(dev, "error setting up resources\n");
+ return ret;
+ }
+ }
+ ret = v3_pci_parse_map_dma_ranges(v3, np);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable PCI to host IO cycles, enable I/O buffers @3.3V,
+ * set AD_LOW0 to 1 if one of the LB_MAP registers choose
+ * to use this (should be unused).
+ */
+ writel(0x00000000, v3->base + V3_PCI_IO_BASE);
+ val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS |
+ V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0;
+ /*
+ * DMA read and write from PCI bus commands types
+ */
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT;
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /*
+ * Set the V3 FIFO such that writes have higher priority than
+ * reads, and local bus write causes local bus read fifo flush
+ * on aperture 1. Same for PCI.
+ */
+ writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1,
+ v3->base + V3_FIFO_PRIORITY);
+
+
+ /*
+ * Clear any error interrupts, and enable parity and write error
+ * interrupts
+ */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_LB_INT;
+ writew(val, v3->base + V3_LB_CFG);
+ writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Special Integrator initialization */
+ if (of_device_is_compatible(np, "arm,integrator-ap-pci")) {
+ ret = v3_integrator_init(v3);
+ if (ret)
+ return ret;
+ }
+
+ /* Post-init: enable PCI memory and invalidate (master already on) */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Clear pending interrupts */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ /* Read or write errors and parity errors cause interrupts */
+ writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Take the PCI bus out of reset so devices can initialize */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /*
+ * Re-lock the system register.
+ */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_LOCK;
+ writew(val, v3->base + V3_SYSTEM);
+
+ return pci_host_probe(host);
+}
+
+static const struct of_device_id v3_pci_of_match[] = {
+ {
+ .compatible = "v3,v360epc-pci",
+ },
+ {},
+};
+
+static struct platform_driver v3_pci_driver = {
+ .driver = {
+ .name = "pci-v3-semi",
+ .of_match_table = v3_pci_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = v3_pci_probe,
+};
+builtin_platform_driver(v3_pci_driver);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
new file mode 100644
index 000000000..e9a6758fe
--- /dev/null
+++ b/drivers/pci/controller/pci-versatile.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2004 Koninklijke Philips Electronics NV
+ *
+ * Conversion to platform driver and DT:
+ * Copyright 2014 Linaro Ltd.
+ *
+ * 14/04/2005 Initial version, colin.king@philips.com
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+static void __iomem *versatile_pci_base;
+static void __iomem *versatile_cfg_base[2];
+
+#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4))
+#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4))
+#define PCI_SELFID (versatile_pci_base + 0xc)
+
+#define VP_PCI_DEVICE_ID 0x030010ee
+#define VP_PCI_CLASS_ID 0x0b400000
+
+static u32 pci_slot_ignore;
+
+static int __init versatile_pci_slot_ignore(char *str)
+{
+ int slot;
+
+ while (get_option(&str, &slot)) {
+ if ((slot < 0) || (slot > 31))
+ pr_err("Illegal slot value: %d\n", slot);
+ else
+ pci_slot_ignore |= (1 << slot);
+ }
+ return 1;
+}
+__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
+
+
+static void __iomem *versatile_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ unsigned int busnr = bus->number;
+
+ if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
+ return NULL;
+
+ return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
+}
+
+static struct pci_ops pci_versatile_ops = {
+ .map_bus = versatile_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write,
+};
+
+static int versatile_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct resource_entry *entry;
+ int i, myslot = -1, mem = 1;
+ u32 val;
+ void __iomem *local_pci_cfg_base;
+ struct pci_host_bridge *bridge;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ versatile_pci_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(versatile_pci_base))
+ return PTR_ERR(versatile_pci_base);
+
+ versatile_cfg_base[0] = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(versatile_cfg_base[0]))
+ return PTR_ERR(versatile_cfg_base[0]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(versatile_cfg_base[1]))
+ return PTR_ERR(versatile_cfg_base[1]);
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ writel(entry->res->start >> 28, PCI_IMAP(mem));
+ writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem));
+ mem++;
+ }
+ }
+
+ /*
+ * We need to discover the PCI core first to configure itself
+ * before the main PCI probing is performed
+ */
+ for (i = 0; i < 32; i++) {
+ if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
+ (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
+ myslot = i;
+ break;
+ }
+ }
+ if (myslot == -1) {
+ dev_err(dev, "Cannot find PCI core!\n");
+ return -EIO;
+ }
+ /*
+ * Do not to map Versatile FPGA PCI device into memory space
+ */
+ pci_slot_ignore |= (1 << myslot);
+
+ dev_info(dev, "PCI core found (slot %d)\n", myslot);
+
+ writel(myslot, PCI_SELFID);
+ local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
+
+ val = readl(local_pci_cfg_base + PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+ writel(val, local_pci_cfg_base + PCI_COMMAND);
+
+ /*
+ * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+ */
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+ /*
+ * For many years the kernel and QEMU were symbiotically buggy
+ * in that they both assumed the same broken IRQ mapping.
+ * QEMU therefore attempts to auto-detect old broken kernels
+ * so that they still work on newer QEMU as they did on old
+ * QEMU. Since we now use the correct (ie matching-hardware)
+ * IRQ mapping we write a definitely different value to a
+ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+ * real hardware behaviour and it need not be backwards
+ * compatible for us. This write is harmless on real hardware.
+ */
+ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
+
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ bridge->ops = &pci_versatile_ops;
+
+ return pci_host_probe(bridge);
+}
+
+static const struct of_device_id versatile_pci_of_match[] = {
+ { .compatible = "arm,versatile-pci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
+
+static struct platform_driver versatile_pci_driver = {
+ .driver = {
+ .name = "versatile-pci",
+ .of_match_table = versatile_pci_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = versatile_pci_probe,
+};
+module_platform_driver(versatile_pci_driver);
+
+MODULE_DESCRIPTION("Versatile PCI driver");
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
new file mode 100644
index 000000000..3ce38dfd0
--- /dev/null
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * APM X-Gene MSI Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ * Duc Dang <dhdang@apm.com>
+ */
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_pci.h>
+
+#define MSI_IR0 0x000000
+#define MSI_INT0 0x800000
+#define IDX_PER_GROUP 8
+#define IRQS_PER_IDX 16
+#define NR_HW_IRQS 16
+#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+
+struct xgene_msi_group {
+ struct xgene_msi *msi;
+ int gic_irq;
+ u32 msi_grp;
+};
+
+struct xgene_msi {
+ struct device_node *node;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
+ u64 msi_addr;
+ void __iomem *msi_regs;
+ unsigned long *bitmap;
+ struct mutex bitmap_lock;
+ struct xgene_msi_group *msi_groups;
+ int num_cpus;
+};
+
+/* Global data */
+static struct xgene_msi xgene_msi_ctrl;
+
+static struct irq_chip xgene_msi_top_irq_chip = {
+ .name = "X-Gene1 MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xgene_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &xgene_msi_top_irq_chip,
+};
+
+/*
+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
+ * n is group number (0..F), x is index of registers in each group (0..7)
+ * The register layout is as follows:
+ * MSI0IR0 base_addr
+ * MSI0IR1 base_addr + 0x10000
+ * ... ...
+ * MSI0IR6 base_addr + 0x60000
+ * MSI0IR7 base_addr + 0x70000
+ * MSI1IR0 base_addr + 0x80000
+ * MSI1IR1 base_addr + 0x90000
+ * ... ...
+ * MSI1IR7 base_addr + 0xF0000
+ * MSI2IR0 base_addr + 0x100000
+ * ... ...
+ * MSIFIR0 base_addr + 0x780000
+ * MSIFIR1 base_addr + 0x790000
+ * ... ...
+ * MSIFIR7 base_addr + 0x7F0000
+ * MSIINT0 base_addr + 0x800000
+ * MSIINT1 base_addr + 0x810000
+ * ... ...
+ * MSIINTF base_addr + 0x8F0000
+ *
+ * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
+ * registers.
+ *
+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
+ * the MSI pending status caused by 1 of its 8 index registers.
+ */
+
+/* MSInIRx read helper */
+static u32 xgene_msi_ir_read(struct xgene_msi *msi,
+ u32 msi_grp, u32 msir_idx)
+{
+ return readl_relaxed(msi->msi_regs + MSI_IR0 +
+ (msi_grp << 19) + (msir_idx << 16));
+}
+
+/* MSIINTn read helper */
+static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
+{
+ return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+}
+
+/*
+ * With 2048 MSI vectors supported, the MSI message can be constructed using
+ * following scheme:
+ * - Divide into 8 256-vector groups
+ * Group 0: 0-255
+ * Group 1: 256-511
+ * Group 2: 512-767
+ * ...
+ * Group 7: 1792-2047
+ * - Each 256-vector group is divided into 16 16-vector groups
+ * As an example: 16 16-vector groups for 256-vector group 0-255 is
+ * Group 0: 0-15
+ * Group 1: 16-32
+ * ...
+ * Group 15: 240-255
+ * - The termination address of MSI vector in 256-vector group n and 16-vector
+ * group x is the address of MSIxIRn
+ * - The data for MSI vector in 16-vector group x is x
+ */
+static u32 hwirq_to_reg_set(unsigned long hwirq)
+{
+ return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
+}
+
+static u32 hwirq_to_group(unsigned long hwirq)
+{
+ return (hwirq % NR_HW_IRQS);
+}
+
+static u32 hwirq_to_msi_data(unsigned long hwirq)
+{
+ return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+}
+
+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
+ u32 reg_set = hwirq_to_reg_set(data->hwirq);
+ u32 group = hwirq_to_group(data->hwirq);
+ u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+
+ msg->address_hi = upper_32_bits(target_addr);
+ msg->address_lo = lower_32_bits(target_addr);
+ msg->data = hwirq_to_msi_data(data->hwirq);
+}
+
+/*
+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
+ * the expected behaviour of .set_affinity for each MSI interrupt, the 16
+ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
+ * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
+ * consequence, the total MSI vectors that X-Gene v1 supports will be
+ * reduced to 256 (2048/8) vectors.
+ */
+static int hwirq_to_cpu(unsigned long hwirq)
+{
+ return (hwirq % xgene_msi_ctrl.num_cpus);
+}
+
+static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
+{
+ return (hwirq - hwirq_to_cpu(hwirq));
+}
+
+static int xgene_msi_set_affinity(struct irq_data *irqdata,
+ const struct cpumask *mask, bool force)
+{
+ int target_cpu = cpumask_first(mask);
+ int curr_cpu;
+
+ curr_cpu = hwirq_to_cpu(irqdata->hwirq);
+ if (curr_cpu == target_cpu)
+ return IRQ_SET_MASK_OK_DONE;
+
+ /* Update MSI number to target the new CPU */
+ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip xgene_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = xgene_msi_set_affinity,
+ .irq_compose_msi_msg = xgene_compose_msi_msg,
+};
+
+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xgene_msi *msi = domain->host_data;
+ int msi_irq;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
+ msi->num_cpus, 0);
+ if (msi_irq < NR_MSI_VEC)
+ bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
+ else
+ msi_irq = -ENOSPC;
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ if (msi_irq < 0)
+ return msi_irq;
+
+ irq_domain_set_info(domain, virq, msi_irq,
+ &xgene_msi_bottom_irq_chip, domain->host_data,
+ handle_simple_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void xgene_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
+ u32 hwirq;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ hwirq = hwirq_to_canonical_hwirq(d->hwirq);
+ bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = xgene_irq_domain_alloc,
+ .free = xgene_irq_domain_free,
+};
+
+static int xgene_allocate_domains(struct xgene_msi *msi)
+{
+ msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain)
+ return -ENOMEM;
+
+ msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
+ &xgene_msi_domain_info,
+ msi->inner_domain);
+
+ if (!msi->msi_domain) {
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xgene_free_domains(struct xgene_msi *msi)
+{
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+ if (msi->inner_domain)
+ irq_domain_remove(msi->inner_domain);
+}
+
+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+{
+ xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
+ if (!xgene_msi->bitmap)
+ return -ENOMEM;
+
+ mutex_init(&xgene_msi->bitmap_lock);
+
+ xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
+ sizeof(struct xgene_msi_group),
+ GFP_KERNEL);
+ if (!xgene_msi->msi_groups)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void xgene_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xgene_msi_group *msi_groups;
+ struct xgene_msi *xgene_msi;
+ int msir_index, msir_val, hw_irq, ret;
+ u32 intr_index, grp_select, msi_grp;
+
+ chained_irq_enter(chip, desc);
+
+ msi_groups = irq_desc_get_handler_data(desc);
+ xgene_msi = msi_groups->msi;
+ msi_grp = msi_groups->msi_grp;
+
+ /*
+ * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
+ * If bit x of this register is set (x is 0..7), one or more interrupts
+ * corresponding to MSInIRx is set.
+ */
+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+ while (grp_select) {
+ msir_index = ffs(grp_select) - 1;
+ /*
+ * Calculate MSInIRx address to read to check for interrupts
+ * (refer to termination address and data assignment
+ * described in xgene_compose_msi_msg() )
+ */
+ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
+ while (msir_val) {
+ intr_index = ffs(msir_val) - 1;
+ /*
+ * Calculate MSI vector number (refer to the termination
+ * address and data assignment described in
+ * xgene_compose_msi_msg function)
+ */
+ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
+ NR_HW_IRQS) + msi_grp;
+ /*
+ * As we have multiple hw_irq that maps to single MSI,
+ * always look up the virq using the hw_irq as seen from
+ * CPU0
+ */
+ hw_irq = hwirq_to_canonical_hwirq(hw_irq);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ WARN_ON_ONCE(ret);
+ msir_val &= ~(1 << intr_index);
+ }
+ grp_select &= ~(1 << msir_index);
+
+ if (!grp_select) {
+ /*
+ * We handled all interrupts happened in this group,
+ * resample this group MSI_INTx register in case
+ * something else has been made pending in the meantime
+ */
+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static enum cpuhp_state pci_xgene_online;
+
+static void xgene_msi_remove(struct platform_device *pdev)
+{
+ struct xgene_msi *msi = platform_get_drvdata(pdev);
+
+ if (pci_xgene_online)
+ cpuhp_remove_state(pci_xgene_online);
+ cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
+
+ kfree(msi->msi_groups);
+
+ bitmap_free(msi->bitmap);
+ msi->bitmap = NULL;
+
+ xgene_free_domains(msi);
+}
+
+static int xgene_msi_hwirq_alloc(unsigned int cpu)
+{
+ struct xgene_msi *msi = &xgene_msi_ctrl;
+ struct xgene_msi_group *msi_group;
+ cpumask_var_t mask;
+ int i;
+ int err;
+
+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+ msi_group = &msi->msi_groups[i];
+ if (!msi_group->gic_irq)
+ continue;
+
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ xgene_msi_isr, msi_group);
+
+ /*
+ * Statically allocate MSI GIC IRQs to each CPU core.
+ * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
+ * to each core.
+ */
+ if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+ err = irq_set_affinity(msi_group->gic_irq, mask);
+ if (err)
+ pr_err("failed to set affinity for GIC IRQ");
+ free_cpumask_var(mask);
+ } else {
+ pr_err("failed to alloc CPU mask for affinity\n");
+ err = -EINVAL;
+ }
+
+ if (err) {
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ NULL, NULL);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int xgene_msi_hwirq_free(unsigned int cpu)
+{
+ struct xgene_msi *msi = &xgene_msi_ctrl;
+ struct xgene_msi_group *msi_group;
+ int i;
+
+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+ msi_group = &msi->msi_groups[i];
+ if (!msi_group->gic_irq)
+ continue;
+
+ irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+ NULL);
+ }
+ return 0;
+}
+
+static const struct of_device_id xgene_msi_match_table[] = {
+ {.compatible = "apm,xgene1-msi"},
+ {},
+};
+
+static int xgene_msi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc, irq_index;
+ struct xgene_msi *xgene_msi;
+ int virt_msir;
+ u32 msi_val, msi_idx;
+
+ xgene_msi = &xgene_msi_ctrl;
+
+ platform_set_drvdata(pdev, xgene_msi);
+
+ xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(xgene_msi->msi_regs)) {
+ rc = PTR_ERR(xgene_msi->msi_regs);
+ goto error;
+ }
+ xgene_msi->msi_addr = res->start;
+ xgene_msi->node = pdev->dev.of_node;
+ xgene_msi->num_cpus = num_possible_cpus();
+
+ rc = xgene_msi_init_allocator(xgene_msi);
+ if (rc) {
+ dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
+ goto error;
+ }
+
+ rc = xgene_allocate_domains(xgene_msi);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
+ goto error;
+ }
+
+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+ virt_msir = platform_get_irq(pdev, irq_index);
+ if (virt_msir < 0) {
+ rc = virt_msir;
+ goto error;
+ }
+ xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
+ xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
+ xgene_msi->msi_groups[irq_index].msi = xgene_msi;
+ }
+
+ /*
+ * MSInIRx registers are read-to-clear; before registering
+ * interrupt handlers, read all of them to clear spurious
+ * interrupts that may occur before the driver is probed.
+ */
+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+ for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+ xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
+
+ /* Read MSIINTn to confirm */
+ msi_val = xgene_msi_int_read(xgene_msi, irq_index);
+ if (msi_val) {
+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
+ xgene_msi_hwirq_alloc, NULL);
+ if (rc < 0)
+ goto err_cpuhp;
+ pci_xgene_online = rc;
+ rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
+ xgene_msi_hwirq_free);
+ if (rc)
+ goto err_cpuhp;
+
+ dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
+
+ return 0;
+
+err_cpuhp:
+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
+error:
+ xgene_msi_remove(pdev);
+ return rc;
+}
+
+static struct platform_driver xgene_msi_driver = {
+ .driver = {
+ .name = "xgene-msi",
+ .of_match_table = xgene_msi_match_table,
+ },
+ .probe = xgene_msi_probe,
+ .remove_new = xgene_msi_remove,
+};
+
+static int __init xgene_pcie_msi_init(void)
+{
+ return platform_driver_register(&xgene_msi_driver);
+}
+subsys_initcall(xgene_pcie_msi_init);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
new file mode 100644
index 000000000..887b4941f
--- /dev/null
+++ b/drivers/pci/controller/pci-xgene.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * APM X-Gene PCIe Driver
+ *
+ * Copyright (c) 2014 Applied Micro Circuits Corporation.
+ *
+ * Author: Tanmay Inamdar <tinamdar@apm.com>.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+#define PCIECORE_CTLANDSTATUS 0x50
+#define PIM1_1L 0x80
+#define IBAR2 0x98
+#define IR2MSK 0x9c
+#define PIM2_1L 0xa0
+#define IBAR3L 0xb4
+#define IR3MSKL 0xbc
+#define PIM3_1L 0xc4
+#define OMR1BARL 0x100
+#define OMR2BARL 0x118
+#define OMR3BARL 0x130
+#define CFGBARL 0x154
+#define CFGBARH 0x158
+#define CFGCTL 0x15c
+#define RTDID 0x160
+#define BRIDGE_CFG_0 0x2000
+#define BRIDGE_CFG_4 0x2010
+#define BRIDGE_STATUS_0 0x2600
+
+#define LINK_UP_MASK 0x00000100
+#define AXI_EP_CFG_ACCESS 0x10000
+#define EN_COHERENCY 0xF0000000
+#define EN_REG 0x00000001
+#define OB_LO_IO 0x00000002
+#define XGENE_PCIE_DEVICEID 0xE004
+#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
+
+#define XGENE_V1_PCI_EXP_CAP 0x40
+
+/* PCIe IP version */
+#define XGENE_PCIE_IP_VER_UNKN 0
+#define XGENE_PCIE_IP_VER_1 1
+#define XGENE_PCIE_IP_VER_2 2
+
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+struct xgene_pcie {
+ struct device_node *node;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *csr_base;
+ void __iomem *cfg_base;
+ unsigned long cfg_addr;
+ bool link_up;
+ u32 version;
+};
+
+static u32 xgene_pcie_readl(struct xgene_pcie *port, u32 reg)
+{
+ return readl(port->csr_base + reg);
+}
+
+static void xgene_pcie_writel(struct xgene_pcie *port, u32 reg, u32 val)
+{
+ writel(val, port->csr_base + reg);
+}
+
+static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
+{
+ return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+}
+
+static inline struct xgene_pcie *pcie_bus_to_port(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
+
+ if (acpi_disabled)
+ return (struct xgene_pcie *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct xgene_pcie *)(cfg->priv);
+}
+
+/*
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
+ * treated as Type 1 and it will be forwarded to external PCIe device.
+ */
+static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
+{
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
+
+ if (bus->number >= (bus->primary + 1))
+ return port->cfg_base + AXI_EP_CFG_ACCESS;
+
+ return port->cfg_base;
+}
+
+/*
+ * For Configuration request, RTDID register is used as Bus Number,
+ * Device Number and Function number of the header fields.
+ */
+static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
+{
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
+ unsigned int b, d, f;
+ u32 rtdid_val = 0;
+
+ b = bus->number;
+ d = PCI_SLOT(devfn);
+ f = PCI_FUNC(devfn);
+
+ if (!pci_is_root_bus(bus))
+ rtdid_val = (b << 8) | (d << 3) | f;
+
+ xgene_pcie_writel(port, RTDID, rtdid_val);
+ /* read the register back to ensure flush */
+ xgene_pcie_readl(port, RTDID);
+}
+
+/*
+ * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
+ * the translation from PCI bus to native BUS. Entire DDR region
+ * is mapped into PCIe space using these registers, so it can be
+ * reached by DMA from EP devices. The BAR0/1 of bridge should be
+ * hidden during enumeration to avoid the sizing and resource allocation
+ * by PCIe core.
+ */
+static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
+{
+ if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
+ (offset == PCI_BASE_ADDRESS_1)))
+ return true;
+
+ return false;
+}
+
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if ((pci_is_root_bus(bus) && devfn != 0) ||
+ xgene_pcie_hide_rc_bars(bus, offset))
+ return NULL;
+
+ xgene_pcie_set_rtdid_reg(bus, devfn);
+ return xgene_pcie_get_cfg_base(bus) + offset;
+}
+
+static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
+
+ if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
+ PCIBIOS_SUCCESSFUL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The v1 controller has a bug in its Configuration Request Retry
+ * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * we read the Vendor and Device ID of a non-existent device, the
+ * controller fabricates return data of 0xFFFF0001 ("device exists
+ * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
+ * ("device does not exist"). This causes the PCI core to retry
+ * the read until it times out. Avoid this by not claiming to
+ * support CRS SV.
+ */
+ if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
+ ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+#endif
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+ struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct xgene_pcie *port;
+ struct resource csr;
+ int ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = xgene_get_csr_resource(adev, &csr);
+ if (ret) {
+ dev_err(dev, "can't get CSR resource\n");
+ return ret;
+ }
+ port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
+ if (IS_ERR(port->csr_base))
+ return PTR_ERR(port->csr_base);
+
+ port->cfg_base = cfg->win;
+ port->version = ipversion;
+
+ cfg->priv = port;
+ return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+ .init = xgene_v1_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+ .init = xgene_v2_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
+#endif
+
+#if defined(CONFIG_PCI_XGENE)
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
+ u32 flags, u64 size)
+{
+ u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+ u32 val32 = 0;
+ u32 val;
+
+ val32 = xgene_pcie_readl(port, addr);
+ val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
+ xgene_pcie_writel(port, addr, val);
+
+ val32 = xgene_pcie_readl(port, addr + 0x04);
+ val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
+ xgene_pcie_writel(port, addr + 0x04, val);
+
+ val32 = xgene_pcie_readl(port, addr + 0x04);
+ val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
+ xgene_pcie_writel(port, addr + 0x04, val);
+
+ val32 = xgene_pcie_readl(port, addr + 0x08);
+ val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
+ xgene_pcie_writel(port, addr + 0x08, val);
+
+ return mask;
+}
+
+static void xgene_pcie_linkup(struct xgene_pcie *port,
+ u32 *lanes, u32 *speed)
+{
+ u32 val32;
+
+ port->link_up = false;
+ val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
+ if (val32 & LINK_UP_MASK) {
+ port->link_up = true;
+ *speed = PIPE_PHY_RATE_RD(val32);
+ val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
+ *lanes = val32 >> 26;
+ }
+}
+
+static int xgene_pcie_init_port(struct xgene_pcie *port)
+{
+ struct device *dev = port->dev;
+ int rc;
+
+ port->clk = clk_get(dev, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "clock not available\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(port->clk);
+ if (rc) {
+ dev_err(dev, "clock enable failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xgene_pcie_map_reg(struct xgene_pcie *port,
+ struct platform_device *pdev)
+{
+ struct device *dev = port->dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+ port->csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(port->csr_base))
+ return PTR_ERR(port->csr_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->cfg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->cfg_base))
+ return PTR_ERR(port->cfg_base);
+ port->cfg_addr = res->start;
+
+ return 0;
+}
+
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie *port,
+ struct resource *res, u32 offset,
+ u64 cpu_addr, u64 pci_addr)
+{
+ struct device *dev = port->dev;
+ resource_size_t size = resource_size(res);
+ u64 restype = resource_type(res);
+ u64 mask = 0;
+ u32 min_size;
+ u32 flag = EN_REG;
+
+ if (restype == IORESOURCE_MEM) {
+ min_size = SZ_128M;
+ } else {
+ min_size = 128;
+ flag |= OB_LO_IO;
+ }
+
+ if (size >= min_size)
+ mask = ~(size - 1) | flag;
+ else
+ dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
+ (u64)size, min_size);
+
+ xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
+ xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
+}
+
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie *port)
+{
+ u64 addr = port->cfg_addr;
+
+ xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
+ xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
+ xgene_pcie_writel(port, CFGCTL, EN_REG);
+}
+
+static int xgene_pcie_map_ranges(struct xgene_pcie *port)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
+ struct resource_entry *window;
+ struct device *dev = port->dev;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ struct resource *res = window->res;
+ u64 restype = resource_type(res);
+
+ dev_dbg(dev, "%pR\n", res);
+
+ switch (restype) {
+ case IORESOURCE_IO:
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
+ pci_pio_to_address(res->start),
+ res->start - window->offset);
+ break;
+ case IORESOURCE_MEM:
+ if (res->flags & IORESOURCE_PREFETCH)
+ xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
+ res->start,
+ res->start -
+ window->offset);
+ else
+ xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
+ res->start,
+ res->start -
+ window->offset);
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ dev_err(dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+ xgene_pcie_setup_cfg_reg(port);
+ return 0;
+}
+
+static void xgene_pcie_setup_pims(struct xgene_pcie *port, u32 pim_reg,
+ u64 pim, u64 size)
+{
+ xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
+ xgene_pcie_writel(port, pim_reg + 0x04,
+ upper_32_bits(pim) | EN_COHERENCY);
+ xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
+ xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
+}
+
+/*
+ * X-Gene PCIe support maximum 3 inbound memory regions
+ * This function helps to select a region based on size of region
+ */
+static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
+{
+ if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
+ *ib_reg_mask |= (1 << 1);
+ return 1;
+ }
+
+ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+ *ib_reg_mask |= (1 << 0);
+ return 0;
+ }
+
+ if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
+ *ib_reg_mask |= (1 << 2);
+ return 2;
+ }
+
+ return -EINVAL;
+}
+
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
+ struct of_pci_range *range, u8 *ib_reg_mask)
+{
+ void __iomem *cfg_base = port->cfg_base;
+ struct device *dev = port->dev;
+ void __iomem *bar_addr;
+ u32 pim_reg;
+ u64 cpu_addr = range->cpu_addr;
+ u64 pci_addr = range->pci_addr;
+ u64 size = range->size;
+ u64 mask = ~(size - 1) | EN_REG;
+ u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
+ u32 bar_low;
+ int region;
+
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ if (region < 0) {
+ dev_warn(dev, "invalid pcie dma-range config\n");
+ return;
+ }
+
+ if (range->flags & IORESOURCE_PREFETCH)
+ flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+ bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
+ switch (region) {
+ case 0:
+ xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
+ bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
+ writel(bar_low, bar_addr);
+ writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+ pim_reg = PIM1_1L;
+ break;
+ case 1:
+ xgene_pcie_writel(port, IBAR2, bar_low);
+ xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
+ pim_reg = PIM2_1L;
+ break;
+ case 2:
+ xgene_pcie_writel(port, IBAR3L, bar_low);
+ xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
+ xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
+ pim_reg = PIM3_1L;
+ break;
+ }
+
+ xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
+}
+
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port)
+{
+ struct device_node *np = port->node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = port->dev;
+ u8 ib_reg_mask = 0;
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+
+ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+ }
+ return 0;
+}
+
+/* clear BAR configuration which was done by firmware */
+static void xgene_pcie_clear_config(struct xgene_pcie *port)
+{
+ int i;
+
+ for (i = PIM1_1L; i <= CFGCTL; i += 4)
+ xgene_pcie_writel(port, i, 0);
+}
+
+static int xgene_pcie_setup(struct xgene_pcie *port)
+{
+ struct device *dev = port->dev;
+ u32 val, lanes = 0, speed = 0;
+ int ret;
+
+ xgene_pcie_clear_config(port);
+
+ /* setup the vendor and device IDs correctly */
+ val = (XGENE_PCIE_DEVICEID << 16) | PCI_VENDOR_ID_AMCC;
+ xgene_pcie_writel(port, BRIDGE_CFG_0, val);
+
+ ret = xgene_pcie_map_ranges(port);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_parse_map_dma_ranges(port);
+ if (ret)
+ return ret;
+
+ xgene_pcie_linkup(port, &lanes, &speed);
+ if (!port->link_up)
+ dev_info(dev, "(rc) link down\n");
+ else
+ dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
+ return 0;
+}
+
+static struct pci_ops xgene_pcie_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int xgene_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct xgene_pcie *port;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENOMEM;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->node = of_node_get(dn);
+ port->dev = dev;
+
+ port->version = XGENE_PCIE_IP_VER_UNKN;
+ if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
+ port->version = XGENE_PCIE_IP_VER_1;
+
+ ret = xgene_pcie_map_reg(port, pdev);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_init_port(port);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_setup(port);
+ if (ret)
+ return ret;
+
+ bridge->sysdata = port;
+ bridge->ops = &xgene_pcie_ops;
+
+ return pci_host_probe(bridge);
+}
+
+static const struct of_device_id xgene_pcie_match_table[] = {
+ {.compatible = "apm,xgene-pcie",},
+ {},
+};
+
+static struct platform_driver xgene_pcie_driver = {
+ .driver = {
+ .name = "xgene-pcie",
+ .of_match_table = xgene_pcie_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xgene_pcie_probe,
+};
+builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
new file mode 100644
index 000000000..6ad542749
--- /dev/null
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Altera PCIe MSI support
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ *
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MSI_STATUS 0x0
+#define MSI_ERROR 0x4
+#define MSI_INTMASK 0x8
+
+#define MAX_MSI_VECTORS 32
+
+struct altera_msi {
+ DECLARE_BITMAP(used, MAX_MSI_VECTORS);
+ struct mutex lock; /* protect "used" bitmap */
+ struct platform_device *pdev;
+ struct irq_domain *msi_domain;
+ struct irq_domain *inner_domain;
+ void __iomem *csr_base;
+ void __iomem *vector_base;
+ phys_addr_t vector_phy;
+ u32 num_of_vectors;
+ int irq;
+};
+
+static inline void msi_writel(struct altera_msi *msi, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, msi->csr_base + reg);
+}
+
+static inline u32 msi_readl(struct altera_msi *msi, const u32 reg)
+{
+ return readl_relaxed(msi->csr_base + reg);
+}
+
+static void altera_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_msi *msi;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ msi = irq_desc_get_handler_data(desc);
+
+ while ((status = msi_readl(msi, MSI_STATUS)) != 0) {
+ for_each_set_bit(bit, &status, msi->num_of_vectors) {
+ /* Dummy read from vector to clear the interrupt */
+ readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
+
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
+ dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n");
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip altera_msi_irq_chip = {
+ .name = "Altera PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info altera_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &altera_msi_irq_chip,
+};
+
+static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct altera_msi *msi = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int altera_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip altera_msi_bottom_irq_chip = {
+ .name = "Altera MSI",
+ .irq_compose_msi_msg = altera_compose_msi_msg,
+ .irq_set_affinity = altera_msi_set_affinity,
+};
+
+static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct altera_msi *msi = domain->host_data;
+ unsigned long bit;
+ u32 mask;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->used, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask |= 1 << bit;
+ msi_writel(msi, mask, MSI_INTMASK);
+
+ return 0;
+}
+
+static void altera_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct altera_msi *msi = irq_data_get_irq_chip_data(d);
+ u32 mask;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->used)) {
+ dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->used);
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask &= ~(1 << d->hwirq);
+ msi_writel(msi, mask, MSI_INTMASK);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = altera_irq_domain_alloc,
+ .free = altera_irq_domain_free,
+};
+
+static int altera_allocate_domains(struct altera_msi *msi)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &altera_msi_domain_info, msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void altera_free_domains(struct altera_msi *msi)
+{
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+}
+
+static void altera_msi_remove(struct platform_device *pdev)
+{
+ struct altera_msi *msi = platform_get_drvdata(pdev);
+
+ msi_writel(msi, 0, MSI_INTMASK);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+
+ altera_free_domains(msi);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int altera_msi_probe(struct platform_device *pdev)
+{
+ struct altera_msi *msi;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi),
+ GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ mutex_init(&msi->lock);
+ msi->pdev = pdev;
+
+ msi->csr_base = devm_platform_ioremap_resource_byname(pdev, "csr");
+ if (IS_ERR(msi->csr_base)) {
+ dev_err(&pdev->dev, "failed to map csr memory\n");
+ return PTR_ERR(msi->csr_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vector_slave");
+ msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi->vector_base))
+ return PTR_ERR(msi->vector_base);
+
+ msi->vector_phy = res->start;
+
+ if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) {
+ dev_err(&pdev->dev, "failed to parse the number of vectors\n");
+ return -EINVAL;
+ }
+
+ ret = altera_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ msi->irq = platform_get_irq(pdev, 0);
+ if (msi->irq < 0) {
+ ret = msi->irq;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi);
+ platform_set_drvdata(pdev, msi);
+
+ return 0;
+
+err:
+ altera_msi_remove(pdev);
+ return ret;
+}
+
+static const struct of_device_id altera_msi_of_match[] = {
+ { .compatible = "altr,msi-1.0", NULL },
+ { },
+};
+
+static struct platform_driver altera_msi_driver = {
+ .driver = {
+ .name = "altera-msi",
+ .of_match_table = altera_msi_of_match,
+ },
+ .probe = altera_msi_probe,
+ .remove_new = altera_msi_remove,
+};
+
+static int __init altera_msi_init(void)
+{
+ return platform_driver_register(&altera_msi_driver);
+}
+
+static void __exit altera_msi_exit(void)
+{
+ platform_driver_unregister(&altera_msi_driver);
+}
+
+subsys_initcall(altera_msi_init);
+MODULE_DEVICE_TABLE(of, altera_msi_of_match);
+module_exit(altera_msi_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
new file mode 100644
index 000000000..a9536dc4b
--- /dev/null
+++ b/drivers/pci/controller/pcie-altera.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ * Description: Altera PCIe host controller driver
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+#define RP_TX_REG0 0x2000
+#define RP_TX_REG1 0x2004
+#define RP_TX_CNTRL 0x2008
+#define RP_TX_EOP 0x2
+#define RP_TX_SOP 0x1
+#define RP_RXCPL_STATUS 0x2010
+#define RP_RXCPL_EOP 0x2
+#define RP_RXCPL_SOP 0x1
+#define RP_RXCPL_REG0 0x2014
+#define RP_RXCPL_REG1 0x2018
+#define P2A_INT_STATUS 0x3060
+#define P2A_INT_STS_ALL 0xf
+#define P2A_INT_ENABLE 0x3070
+#define P2A_INT_ENA_ALL 0xf
+#define RP_LTSSM 0x3c64
+#define RP_LTSSM_MASK 0x1f
+#define LTSSM_L0 0xf
+
+#define S10_RP_TX_CNTRL 0x2004
+#define S10_RP_RXCPL_REG 0x2008
+#define S10_RP_RXCPL_STATUS 0x200C
+#define S10_RP_CFG_ADDR(pcie, reg) \
+ (((pcie)->hip_base) + (reg) + (1 << 20))
+#define S10_RP_SECONDARY(pcie) \
+ readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+/* TLP configuration type 0 and 1 */
+#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
+#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
+#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
+#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
+#define TLP_PAYLOAD_SIZE 0x01
+#define TLP_READ_TAG 0x1d
+#define TLP_WRITE_TAG 0x10
+#define RP_DEVFN 0
+#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_CFG_DW0(pcie, cfg) \
+ (((cfg) << 24) | \
+ TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(pcie, tag, be) \
+ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+#define TLP_CFG_DW2(bus, devfn, offset) \
+ (((bus) << 24) | ((devfn) << 16) | (offset))
+#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
+#define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff)
+#define TLP_HDR_SIZE 3
+#define TLP_LOOP 500
+
+#define LINK_UP_TIMEOUT HZ
+#define LINK_RETRAIN_TIMEOUT HZ
+
+#define DWORD_MASK 3
+
+#define S10_TLP_FMTTYPE_CFGRD0 0x05
+#define S10_TLP_FMTTYPE_CFGRD1 0x04
+#define S10_TLP_FMTTYPE_CFGWR0 0x45
+#define S10_TLP_FMTTYPE_CFGWR1 0x44
+
+enum altera_pcie_version {
+ ALTERA_PCIE_V1 = 0,
+ ALTERA_PCIE_V2,
+};
+
+struct altera_pcie {
+ struct platform_device *pdev;
+ void __iomem *cra_base;
+ void __iomem *hip_base;
+ int irq;
+ u8 root_bus_nr;
+ struct irq_domain *irq_domain;
+ struct resource bus_range;
+ const struct altera_pcie_data *pcie_data;
+};
+
+struct altera_pcie_ops {
+ int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value);
+ void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool align);
+ bool (*get_link_status)(struct altera_pcie *pcie);
+ int (*rp_read_cfg)(struct altera_pcie *pcie, int where,
+ int size, u32 *value);
+ int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value);
+};
+
+struct altera_pcie_data {
+ const struct altera_pcie_ops *ops;
+ enum altera_pcie_version version;
+ u32 cap_offset; /* PCIe capability structure register offset */
+ u32 cfgrd0;
+ u32 cfgrd1;
+ u32 cfgwr0;
+ u32 cfgwr1;
+};
+
+struct tlp_rp_regpair_t {
+ u32 ctrl;
+ u32 reg0;
+ u32 reg1;
+};
+
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
+
+static bool altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
+}
+
+static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
+/*
+ * Altera PCIe port uses BAR0 of RC's configuration space as the translation
+ * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
+ * using these registers, so it can be reached by DMA from EP devices.
+ * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
+ * should be hidden during enumeration to avoid the sizing and resource
+ * allocation by PCIe core.
+ */
+static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && (devfn == 0) &&
+ (offset == PCI_BASE_ADDRESS_0))
+ return true;
+
+ return false;
+}
+
+static void tlp_write_tx(struct altera_pcie *pcie,
+ struct tlp_rp_regpair_t *tlp_rp_regdata)
+{
+ cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
+ cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
+ cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
+}
+
+static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl)
+{
+ cra_writel(pcie, reg0, RP_TX_REG0);
+ cra_writel(pcie, ctrl, S10_RP_TX_CNTRL);
+}
+
+static bool altera_pcie_valid_device(struct altera_pcie *pcie,
+ struct pci_bus *bus, int dev)
+{
+ /* If there is no link, then there is no device */
+ if (bus->number != pcie->root_bus_nr) {
+ if (!pcie->pcie_data->ops->get_link_status(pcie))
+ return false;
+ }
+
+ /* access only one slot on each root port */
+ if (bus->number == pcie->root_bus_nr && dev > 0)
+ return false;
+
+ return true;
+}
+
+static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+ int i;
+ bool sop = false;
+ u32 ctrl;
+ u32 reg0, reg1;
+ u32 comp_status = 1;
+
+ /*
+ * Minimum 2 loops to read TLP headers and 1 loop to read data
+ * payload.
+ */
+ for (i = 0; i < TLP_LOOP; i++) {
+ ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
+ if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
+ reg0 = cra_readl(pcie, RP_RXCPL_REG0);
+ reg1 = cra_readl(pcie, RP_RXCPL_REG1);
+
+ if (ctrl & RP_RXCPL_SOP) {
+ sop = true;
+ comp_status = TLP_COMP_STATUS(reg1);
+ }
+
+ if (ctrl & RP_RXCPL_EOP) {
+ if (comp_status)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (value)
+ *value = reg0;
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+ udelay(5);
+ }
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+ u32 ctrl;
+ u32 comp_status;
+ u32 dw[4];
+ u32 count;
+ struct device *dev = &pcie->pdev->dev;
+
+ for (count = 0; count < TLP_LOOP; count++) {
+ ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+ if (ctrl & RP_RXCPL_SOP) {
+ /* Read first DW */
+ dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG);
+ break;
+ }
+
+ udelay(5);
+ }
+
+ /* SOP detection failed, return error */
+ if (count == TLP_LOOP)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ count = 1;
+
+ /* Poll for EOP */
+ while (count < ARRAY_SIZE(dw)) {
+ ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+ dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG);
+ if (ctrl & RP_RXCPL_EOP) {
+ comp_status = TLP_COMP_STATUS(dw[1]);
+ if (comp_status)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) &&
+ count == 4)
+ *value = dw[3];
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+
+ dev_warn(dev, "Malformed TLP packet\n");
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool align)
+{
+ struct tlp_rp_regpair_t tlp_rp_regdata;
+
+ tlp_rp_regdata.reg0 = headers[0];
+ tlp_rp_regdata.reg1 = headers[1];
+ tlp_rp_regdata.ctrl = RP_TX_SOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ if (align) {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = 0;
+ tlp_rp_regdata.ctrl = 0;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ tlp_rp_regdata.reg0 = data;
+ tlp_rp_regdata.reg1 = 0;
+ } else {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = data;
+ }
+
+ tlp_rp_regdata.ctrl = RP_TX_EOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+}
+
+static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool dummy)
+{
+ s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP);
+ s10_tlp_write_tx(pcie, headers[1], 0);
+ s10_tlp_write_tx(pcie, headers[2], 0);
+ s10_tlp_write_tx(pcie, data, RP_TX_EOP);
+}
+
+static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, bool read, u32 *headers)
+{
+ u8 cfg;
+ u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
+ u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
+ u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
+
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1)
+ cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
+ else
+ cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
+
+ headers[0] = TLP_CFG_DW0(pcie, cfg);
+ headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+}
+
+static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 *value)
+{
+ u32 headers[TLP_HDR_SIZE];
+
+ get_tlp_header(pcie, bus, devfn, where, byte_en, true,
+ headers);
+
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
+
+ return pcie->pcie_data->ops->tlp_read_pkt(pcie, value);
+}
+
+static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 value)
+{
+ u32 headers[TLP_HDR_SIZE];
+ int ret;
+
+ get_tlp_header(pcie, bus, devfn, where, byte_en, false,
+ headers);
+
+ /* check alignment to Qword */
+ if ((where & 0x7) == 0)
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+ value, true);
+ else
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+ value, false);
+
+ ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on root port
+ * and update local copy of root bus number accordingly.
+ */
+ if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = (u8)(value);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int s10_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb(addr);
+ break;
+ case 2:
+ *value = readw(addr);
+ break;
+ default:
+ *value = readl(addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb(value, addr);
+ break;
+ case 2:
+ writew(value, addr);
+ break;
+ default:
+ writel(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on root port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 *value)
+{
+ int ret;
+ u32 data;
+ u8 byte_en;
+
+ if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg)
+ return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
+ size, value);
+
+ switch (size) {
+ case 1:
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ byte_en = 0xf;
+ break;
+ }
+
+ ret = tlp_cfg_dword_read(pcie, busno, devfn,
+ (where & ~DWORD_MASK), byte_en, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ switch (size) {
+ case 1:
+ *value = (data >> (8 * (where & 0x3))) & 0xff;
+ break;
+ case 2:
+ *value = (data >> (8 * (where & 0x2))) & 0xffff;
+ break;
+ default:
+ *value = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 value)
+{
+ u32 data32;
+ u32 shift = 8 * (where & 3);
+ u8 byte_en;
+
+ if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg)
+ return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
+ where, size, value);
+
+ switch (size) {
+ case 1:
+ data32 = (value & 0xff) << shift;
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ data32 = (value & 0xffff) << shift;
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ data32 = value;
+ byte_en = 0xf;
+ break;
+ }
+
+ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+ byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static struct pci_ops altera_pcie_ops = {
+ .read = altera_pcie_cfg_read,
+ .write = altera_pcie_cfg_write,
+};
+
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 *value)
+{
+ u32 data;
+ int ret;
+
+ ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+ pcie->pcie_data->cap_offset + offset,
+ sizeof(*value),
+ &data);
+ *value = data;
+ return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 value)
+{
+ return _altera_pcie_cfg_write(pcie, busno, devfn,
+ pcie->pcie_data->cap_offset + offset,
+ sizeof(value),
+ value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u16 reg16;
+ unsigned long start_jiffies;
+
+ /* Wait for link training end. */
+ start_jiffies = jiffies;
+ for (;;) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+ dev_err(dev, "link retrain timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+
+ /* Wait for link is up */
+ start_jiffies = jiffies;
+ for (;;) {
+ if (pcie->pcie_data->ops->get_link_status(pcie))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+ dev_err(dev, "link up timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+ u16 linkcap, linkstat, linkctl;
+
+ if (!pcie->pcie_data->ops->get_link_status(pcie))
+ return;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+ &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+ &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, &linkctl);
+ linkctl |= PCI_EXP_LNKCTL_RL;
+ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, linkctl);
+
+ altera_wait_link_retrain(pcie);
+ }
+}
+
+static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = altera_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static void altera_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ while ((status = cra_readl(pcie, P2A_INT_STATUS)
+ & P2A_INT_STS_ALL) != 0) {
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ /* clear interrupts */
+ cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* Setup INTx */
+ pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_domain_remove(pcie->irq_domain);
+ irq_dispose_mapping(pcie->irq);
+}
+
+static int altera_pcie_parse_dt(struct altera_pcie *pcie)
+{
+ struct platform_device *pdev = pcie->pdev;
+
+ pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra");
+ if (IS_ERR(pcie->cra_base))
+ return PTR_ERR(pcie->cra_base);
+
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ pcie->hip_base =
+ devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (IS_ERR(pcie->hip_base))
+ return PTR_ERR(pcie->hip_base);
+ }
+
+ /* setup IRQ */
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ return 0;
+}
+
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+ altera_pcie_retrain(pcie);
+}
+
+static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
+ .tlp_read_pkt = tlp_read_packet,
+ .tlp_write_pkt = tlp_write_packet,
+ .get_link_status = altera_pcie_link_up,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
+ .tlp_read_pkt = s10_tlp_read_packet,
+ .tlp_write_pkt = s10_tlp_write_packet,
+ .get_link_status = s10_altera_pcie_link_up,
+ .rp_read_cfg = s10_rp_read_cfg,
+ .rp_write_cfg = s10_rp_write_cfg,
+};
+
+static const struct altera_pcie_data altera_pcie_1_0_data = {
+ .ops = &altera_pcie_ops_1_0,
+ .cap_offset = 0x80,
+ .version = ALTERA_PCIE_V1,
+ .cfgrd0 = TLP_FMTTYPE_CFGRD0,
+ .cfgrd1 = TLP_FMTTYPE_CFGRD1,
+ .cfgwr0 = TLP_FMTTYPE_CFGWR0,
+ .cfgwr1 = TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct altera_pcie_data altera_pcie_2_0_data = {
+ .ops = &altera_pcie_ops_2_0,
+ .version = ALTERA_PCIE_V2,
+ .cap_offset = 0x70,
+ .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0,
+ .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1,
+ .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0,
+ .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct of_device_id altera_pcie_of_match[] = {
+ {.compatible = "altr,pcie-root-port-1.0",
+ .data = &altera_pcie_1_0_data },
+ {.compatible = "altr,pcie-root-port-2.0",
+ .data = &altera_pcie_2_0_data },
+ {},
+};
+
+static int altera_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct altera_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int ret;
+ const struct altera_pcie_data *data;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ pcie->pcie_data = data;
+
+ ret = altera_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed\n");
+ return ret;
+ }
+
+ ret = altera_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &altera_pcie_ops;
+
+ return pci_host_probe(bridge);
+}
+
+static void altera_pcie_remove(struct platform_device *pdev)
+{
+ struct altera_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ altera_pcie_irq_teardown(pcie);
+}
+
+static struct platform_driver altera_pcie_driver = {
+ .probe = altera_pcie_probe,
+ .remove_new = altera_pcie_remove,
+ .driver = {
+ .name = "altera-pcie",
+ .of_match_table = altera_pcie_of_match,
+ },
+};
+
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+module_platform_driver(altera_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
new file mode 100644
index 000000000..f7a248393
--- /dev/null
+++ b/drivers/pci/controller/pcie-apple.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host bridge driver for Apple system-on-chips.
+ *
+ * The HW is ECAM compliant, so once the controller is initialized,
+ * the driver mostly deals MSI mapping and handling of per-port
+ * interrupts (INTx, management and error signals).
+ *
+ * Initialization requires enabling power and clocks, along with a
+ * number of register pokes.
+ *
+ * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021 Corellium LLC
+ * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Author: Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/notifier.h>
+#include <linux/of_irq.h>
+#include <linux/pci-ecam.h>
+
+#define CORE_RC_PHYIF_CTL 0x00024
+#define CORE_RC_PHYIF_CTL_RUN BIT(0)
+#define CORE_RC_PHYIF_STAT 0x00028
+#define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
+#define CORE_RC_CTL 0x00050
+#define CORE_RC_CTL_RUN BIT(0)
+#define CORE_RC_STAT 0x00058
+#define CORE_RC_STAT_READY BIT(0)
+#define CORE_FABRIC_STAT 0x04000
+#define CORE_FABRIC_STAT_MASK 0x001F001F
+#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
+#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
+#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
+#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
+#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
+#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
+#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define PORT_LTSSMCTL 0x00080
+#define PORT_LTSSMCTL_START BIT(0)
+#define PORT_INTSTAT 0x00100
+#define PORT_INT_TUNNEL_ERR 31
+#define PORT_INT_CPL_TIMEOUT 23
+#define PORT_INT_RID2SID_MAPERR 22
+#define PORT_INT_CPL_ABORT 21
+#define PORT_INT_MSI_BAD_DATA 19
+#define PORT_INT_MSI_ERR 18
+#define PORT_INT_REQADDR_GT32 17
+#define PORT_INT_AF_TIMEOUT 15
+#define PORT_INT_LINK_DOWN 14
+#define PORT_INT_LINK_UP 12
+#define PORT_INT_LINK_BWMGMT 11
+#define PORT_INT_AER_MASK (15 << 4)
+#define PORT_INT_PORT_ERR 4
+#define PORT_INT_INTx(i) i
+#define PORT_INT_INTx_MASK 15
+#define PORT_INTMSK 0x00104
+#define PORT_INTMSKSET 0x00108
+#define PORT_INTMSKCLR 0x0010c
+#define PORT_MSICFG 0x00124
+#define PORT_MSICFG_EN BIT(0)
+#define PORT_MSICFG_L2MSINUM_SHIFT 4
+#define PORT_MSIBASE 0x00128
+#define PORT_MSIBASE_1_SHIFT 16
+#define PORT_MSIADDR 0x00168
+#define PORT_LINKSTS 0x00208
+#define PORT_LINKSTS_UP BIT(0)
+#define PORT_LINKSTS_BUSY BIT(2)
+#define PORT_LINKCMDSTS 0x00210
+#define PORT_OUTS_NPREQS 0x00284
+#define PORT_OUTS_NPREQS_REQ BIT(24)
+#define PORT_OUTS_NPREQS_CPL BIT(16)
+#define PORT_RXWR_FIFO 0x00288
+#define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
+#define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
+#define PORT_RXRD_FIFO 0x0028C
+#define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
+#define PORT_OUTS_CPLS 0x00290
+#define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
+#define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
+#define PORT_APPCLK 0x00800
+#define PORT_APPCLK_EN BIT(0)
+#define PORT_APPCLK_CGDIS BIT(8)
+#define PORT_STATUS 0x00804
+#define PORT_STATUS_READY BIT(0)
+#define PORT_REFCLK 0x00810
+#define PORT_REFCLK_EN BIT(0)
+#define PORT_REFCLK_CGDIS BIT(8)
+#define PORT_PERST 0x00814
+#define PORT_PERST_OFF BIT(0)
+#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID_VALID BIT(31)
+#define PORT_RID2SID_SID_SHIFT 16
+#define PORT_RID2SID_BUS_SHIFT 8
+#define PORT_RID2SID_DEV_SHIFT 3
+#define PORT_RID2SID_FUNC_SHIFT 0
+#define PORT_OUTS_PREQS_HDR 0x00980
+#define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
+#define PORT_OUTS_PREQS_DATA 0x00984
+#define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
+#define PORT_TUNCTRL 0x00988
+#define PORT_TUNCTRL_PERST_ON BIT(0)
+#define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
+#define PORT_TUNSTAT 0x0098c
+#define PORT_TUNSTAT_PERST_ON BIT(0)
+#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
+#define PORT_PREFMEM_ENABLE 0x00994
+
+#define MAX_RID2SID 64
+
+/*
+ * The doorbell address is set to 0xfffff000, which by convention
+ * matches what MacOS does, and it is possible to use any other
+ * address (in the bottom 4GB, as the base register is only 32bit).
+ * However, it has to be excluded from the IOVA range, and the DART
+ * driver has to know about it.
+ */
+#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+
+struct apple_pcie {
+ struct mutex lock;
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *domain;
+ unsigned long *bitmap;
+ struct list_head ports;
+ struct completion event;
+ struct irq_fwspec fwspec;
+ u32 nvecs;
+};
+
+struct apple_pcie_port {
+ struct apple_pcie *pcie;
+ struct device_node *np;
+ void __iomem *base;
+ struct irq_domain *domain;
+ struct list_head entry;
+ DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ int sid_map_sz;
+ int idx;
+};
+
+static void rmw_set(u32 set, void __iomem *addr)
+{
+ writel_relaxed(readl_relaxed(addr) | set, addr);
+}
+
+static void rmw_clear(u32 clr, void __iomem *addr)
+{
+ writel_relaxed(readl_relaxed(addr) & ~clr, addr);
+}
+
+static void apple_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void apple_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip apple_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_mask = apple_msi_top_irq_mask,
+ .irq_unmask = apple_msi_top_irq_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ msg->address_hi = upper_32_bits(DOORBELL_ADDR);
+ msg->address_lo = lower_32_bits(DOORBELL_ADDR);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip apple_msi_bottom_chip = {
+ .name = "MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_compose_msi_msg = apple_msi_compose_msg,
+};
+
+static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct apple_pcie *pcie = domain->host_data;
+ struct irq_fwspec fwspec = pcie->fwspec;
+ unsigned int i;
+ int ret, hwirq;
+
+ mutex_lock(&pcie->lock);
+
+ hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ fwspec.param[fwspec.param_count - 2] += hwirq;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &apple_msi_bottom_chip,
+ domain->host_data);
+ }
+
+ return 0;
+}
+
+static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct apple_pcie *pcie = domain->host_data;
+
+ mutex_lock(&pcie->lock);
+
+ bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+}
+
+static const struct irq_domain_ops apple_msi_domain_ops = {
+ .alloc = apple_msi_domain_alloc,
+ .free = apple_msi_domain_free,
+};
+
+static struct msi_domain_info apple_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &apple_msi_top_chip,
+};
+
+static void apple_port_irq_mask(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+}
+
+static void apple_port_irq_unmask(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+}
+
+static bool hwirq_is_intx(unsigned int hwirq)
+{
+ return BIT(hwirq) & PORT_INT_INTx_MASK;
+}
+
+static void apple_port_irq_ack(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ if (!hwirq_is_intx(data->hwirq))
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
+}
+
+static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ /*
+ * It doesn't seem that there is any way to configure the
+ * trigger, so assume INTx have to be level (as per the spec),
+ * and the rest is edge (which looks likely).
+ */
+ if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
+ return -EINVAL;
+
+ irqd_set_trigger_type(data, type);
+ return 0;
+}
+
+static struct irq_chip apple_port_irqchip = {
+ .name = "PCIe",
+ .irq_ack = apple_port_irq_ack,
+ .irq_mask = apple_port_irq_mask,
+ .irq_unmask = apple_port_irq_unmask,
+ .irq_set_type = apple_port_irq_set_type,
+};
+
+static int apple_port_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct apple_pcie_port *port = domain->host_data;
+ struct irq_fwspec *fwspec = args;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_flow_handler_t flow = handle_edge_irq;
+ unsigned int type = IRQ_TYPE_EDGE_RISING;
+
+ if (hwirq_is_intx(fwspec->param[0] + i)) {
+ flow = handle_level_irq;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ }
+
+ irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
+ &apple_port_irqchip, port, flow,
+ NULL, NULL);
+
+ irq_set_irq_type(virq + i, type);
+ }
+
+ return 0;
+}
+
+static void apple_port_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops apple_port_irq_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = apple_port_irq_domain_alloc,
+ .free = apple_port_irq_domain_free,
+};
+
+static void apple_port_irq_handler(struct irq_desc *desc)
+{
+ struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long stat;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ stat = readl_relaxed(port->base + PORT_INTSTAT);
+
+ for_each_set_bit(i, &stat, 32)
+ generic_handle_domain_irq(port->domain, i);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = &port->np->fwnode;
+ unsigned int irq;
+
+ /* FIXME: consider moving each interrupt under each port */
+ irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
+ port->idx);
+ if (!irq)
+ return -ENXIO;
+
+ port->domain = irq_domain_create_linear(fwnode, 32,
+ &apple_port_irq_domain_ops,
+ port);
+ if (!port->domain)
+ return -ENOMEM;
+
+ /* Disable all interrupts */
+ writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTSTAT);
+
+ irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
+
+ /* Configure MSI base address */
+ BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+
+ /* Enable MSIs, shared between all ports */
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
+ PORT_MSICFG_EN, port->base + PORT_MSICFG);
+
+ return 0;
+}
+
+static irqreturn_t apple_pcie_port_irq(int irq, void *data)
+{
+ struct apple_pcie_port *port = data;
+ unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
+
+ switch (hwirq) {
+ case PORT_INT_LINK_UP:
+ dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
+ port->np);
+ complete_all(&port->pcie->event);
+ break;
+ case PORT_INT_LINK_DOWN:
+ dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
+ port->np);
+ break;
+ default:
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
+{
+ static struct {
+ unsigned int hwirq;
+ const char *name;
+ } port_irqs[] = {
+ { PORT_INT_LINK_UP, "Link up", },
+ { PORT_INT_LINK_DOWN, "Link down", },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
+ struct irq_fwspec fwspec = {
+ .fwnode = &port->np->fwnode,
+ .param_count = 1,
+ .param = {
+ [0] = port_irqs[i].hwirq,
+ },
+ };
+ unsigned int irq;
+ int ret;
+
+ irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
+ &fwspec);
+ if (WARN_ON(!irq))
+ continue;
+
+ ret = request_irq(irq, apple_pcie_port_irq, 0,
+ port_irqs[i].name, port);
+ WARN_ON(ret);
+ }
+
+ return 0;
+}
+
+static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
+ struct apple_pcie_port *port)
+{
+ u32 stat;
+ int res;
+
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
+ stat & CORE_RC_PHYIF_STAT_REFCLK,
+ 100, 50000);
+ if (res < 0)
+ return res;
+
+ rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
+ stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ 100, 50000);
+ if (res < 0)
+ return res;
+
+ rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
+ stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ 100, 50000);
+
+ if (res < 0)
+ return res;
+
+ rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+
+ rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+
+ return 0;
+}
+
+static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
+ int idx, u32 val)
+{
+ writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ /* Read back to ensure completion of the write */
+ return readl_relaxed(port->base + PORT_RID2SID(idx));
+}
+
+static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ struct device_node *np)
+{
+ struct platform_device *platform = to_platform_device(pcie->dev);
+ struct apple_pcie_port *port;
+ struct gpio_desc *reset;
+ u32 stat, idx;
+ int ret, i;
+
+ reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
+ GPIOD_OUT_LOW, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(np, "reg", 0, &idx);
+ if (ret)
+ return ret;
+
+ /* Use the first reg entry to work out the port index */
+ port->idx = idx >> 11;
+ port->pcie = pcie;
+ port->np = np;
+
+ port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
+
+ /* Assert PERST# before setting up the clock */
+ gpiod_set_value(reset, 1);
+
+ ret = apple_pcie_setup_refclk(pcie, port);
+ if (ret < 0)
+ return ret;
+
+ /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+ usleep_range(100, 200);
+
+ /* Deassert PERST# */
+ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
+ gpiod_set_value(reset, 0);
+
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+
+ ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
+ stat & PORT_STATUS_READY, 100, 250000);
+ if (ret < 0) {
+ dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
+ return ret;
+ }
+
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
+
+ ret = apple_pcie_port_setup_irq(port);
+ if (ret)
+ return ret;
+
+ /* Reset all RID/SID mappings, and check for RAZ/WI registers */
+ for (i = 0; i < MAX_RID2SID; i++) {
+ if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
+ break;
+ apple_pcie_rid2sid_write(port, i, 0);
+ }
+
+ dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
+
+ port->sid_map_sz = i;
+
+ list_add_tail(&port->entry, &pcie->ports);
+ init_completion(&pcie->event);
+
+ ret = apple_pcie_port_register_irqs(port);
+ WARN_ON(ret);
+
+ writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
+
+ if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
+ dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
+
+ return 0;
+}
+
+static int apple_msi_init(struct apple_pcie *pcie)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct of_phandle_args args = {};
+ struct irq_domain *parent;
+ int ret;
+
+ ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
+ "#interrupt-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
+ args.args_count + 1, &pcie->nvecs);
+ if (ret)
+ return ret;
+
+ of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
+ &pcie->fwspec);
+
+ pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
+ if (!pcie->bitmap)
+ return -ENOMEM;
+
+ parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to find parent domain\n");
+ return -ENXIO;
+ }
+
+ parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
+ &apple_msi_domain_ops, pcie);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
+ parent);
+ if (!pcie->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
+{
+ struct pci_config_window *cfg = pdev->sysdata;
+ struct apple_pcie *pcie = cfg->priv;
+ struct pci_dev *port_pdev;
+ struct apple_pcie_port *port;
+
+ /* Find the root port this device is on */
+ port_pdev = pcie_find_root_port(pdev);
+
+ /* If finding the port itself, nothing to do */
+ if (WARN_ON(!port_pdev) || pdev == port_pdev)
+ return NULL;
+
+ list_for_each_entry(port, &pcie->ports, entry) {
+ if (port->idx == PCI_SLOT(port_pdev->devfn))
+ return port;
+ }
+
+ return NULL;
+}
+
+static int apple_pcie_add_device(struct apple_pcie_port *port,
+ struct pci_dev *pdev)
+{
+ u32 sid, rid = pci_dev_id(pdev);
+ int idx, err;
+
+ dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
+ pci_name(pdev->bus->self), port->idx);
+
+ err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
+ "iommu-map-mask", NULL, &sid);
+ if (err)
+ return err;
+
+ mutex_lock(&port->pcie->lock);
+
+ idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
+ if (idx >= 0) {
+ apple_pcie_rid2sid_write(port, idx,
+ PORT_RID2SID_VALID |
+ (sid << PORT_RID2SID_SID_SHIFT) | rid);
+
+ dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
+ rid, sid, idx);
+ }
+
+ mutex_unlock(&port->pcie->lock);
+
+ return idx >= 0 ? 0 : -ENOSPC;
+}
+
+static void apple_pcie_release_device(struct apple_pcie_port *port,
+ struct pci_dev *pdev)
+{
+ u32 rid = pci_dev_id(pdev);
+ int idx;
+
+ mutex_lock(&port->pcie->lock);
+
+ for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
+ u32 val;
+
+ val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ if ((val & 0xffff) == rid) {
+ apple_pcie_rid2sid_write(port, idx, 0);
+ bitmap_release_region(port->sid_map, idx, 0);
+ dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
+ break;
+ }
+ }
+
+ mutex_unlock(&port->pcie->lock);
+}
+
+static int apple_pcie_bus_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct apple_pcie_port *port;
+ int err;
+
+ /*
+ * This is a bit ugly. We assume that if we get notified for
+ * any PCI device, we must be in charge of it, and that there
+ * is no other PCI controller in the whole system. It probably
+ * holds for now, but who knows for how long?
+ */
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ err = apple_pcie_add_device(port, pdev);
+ if (err)
+ return notifier_from_errno(err);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ apple_pcie_release_device(port, pdev);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block apple_pcie_nb = {
+ .notifier_call = apple_pcie_bus_notifier,
+};
+
+static int apple_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *platform = to_platform_device(dev);
+ struct device_node *of_port;
+ struct apple_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+
+ mutex_init(&pcie->lock);
+
+ pcie->base = devm_platform_ioremap_resource(platform, 1);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ cfg->priv = pcie;
+ INIT_LIST_HEAD(&pcie->ports);
+
+ ret = apple_msi_init(pcie);
+ if (ret)
+ return ret;
+
+ for_each_child_of_node(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ of_node_put(of_port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
+ if (ret)
+ return ret;
+
+ ret = pci_host_common_probe(pdev);
+ if (ret)
+ bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
+
+ return ret;
+}
+
+static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
+ .init = apple_pcie_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id apple_pcie_of_match[] = {
+ { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
+
+static struct platform_driver apple_pcie_driver = {
+ .probe = apple_pcie_probe,
+ .driver = {
+ .name = "pcie-apple",
+ .of_match_table = apple_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(apple_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
new file mode 100644
index 000000000..f9dd6622f
--- /dev/null
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -0,0 +1,1628 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2009 - 2019 Broadcom */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../pci.h"
+
+/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
+#define BRCM_PCIE_CAP_REGS 0x00ac
+
+/* Broadcom STB PCIe Register Offsets */
+#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
+#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
+#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+
+#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
+#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
+
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+
+#define PCIE_RC_DL_MDIO_ADDR 0x1100
+#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
+#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+
+#define PCIE_MISC_MISC_CTRL 0x4008
+#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
+#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
+#define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
+#define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
+#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
+
+#define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000
+#define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000
+#define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f
+#define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
+#define PCIE_MEM_WIN0_LO(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010
+#define PCIE_MEM_WIN0_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+
+#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
+#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
+
+#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
+#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
+#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+
+#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
+#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
+
+#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
+#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
+
+#define PCIE_MISC_MSI_DATA_CONFIG 0x404c
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540
+
+#define PCIE_MISC_PCIE_CTRL 0x4064
+#define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1
+#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4
+
+#define PCIE_MISC_PCIE_STATUS 0x4068
+#define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80
+#define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20
+#define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10
+#define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40
+
+#define PCIE_MISC_REVISION 0x406c
+#define BRCM_PCIE_HW_REV_33 0x0303
+#define BRCM_PCIE_HW_REV_3_20 0x0320
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0
+#define PCIE_MEM_WIN0_BASE_LIMIT(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff
+#define PCIE_MEM_WIN0_BASE_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff
+#define PCIE_MEM_WIN0_LIMIT_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
+
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
+#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
+
+
+#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MSI_INTR2_BASE 0x4500
+/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+#define MSI_INT_STATUS 0x0
+#define MSI_INT_CLR 0x8
+#define MSI_INT_MASK_SET 0x10
+#define MSI_INT_MASK_CLR 0x14
+
+#define PCIE_EXT_CFG_DATA 0x8000
+#define PCIE_EXT_CFG_INDEX 0x9000
+
+#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
+#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
+
+#define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
+#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
+#define RGR1_SW_INIT_1_INIT_7278_MASK 0x1
+#define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0
+
+/* PCIe parameters */
+#define BRCM_NUM_PCIE_OUT_WINS 0x4
+#define BRCM_INT_PCI_MSI_NR 32
+#define BRCM_INT_PCI_MSI_LEGACY_NR 8
+#define BRCM_INT_PCI_MSI_SHIFT 0
+#define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
+#define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \
+ 32 - BRCM_INT_PCI_MSI_LEGACY_NR)
+
+/* MSI target addresses */
+#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
+#define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL
+
+/* MDIO registers */
+#define MDIO_PORT0 0x0
+#define MDIO_DATA_MASK 0x7fffffff
+#define MDIO_PORT_MASK 0xf0000
+#define MDIO_REGAD_MASK 0xffff
+#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_READ 0x1
+#define MDIO_CMD_WRITE 0x0
+#define MDIO_DATA_DONE_MASK 0x80000000
+#define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
+#define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
+#define SSC_REGS_ADDR 0x1100
+#define SET_ADDR_OFFSET 0x1f
+#define SSC_CNTL_OFFSET 0x2
+#define SSC_CNTL_OVRD_EN_MASK 0x8000
+#define SSC_CNTL_OVRD_VAL_MASK 0x4000
+#define SSC_STATUS_OFFSET 0x1
+#define SSC_STATUS_SSC_MASK 0x400
+#define SSC_STATUS_PLL_LOCK_MASK 0x800
+#define PCIE_BRCM_MAX_MEMC 3
+
+#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+
+/* Rescal registers */
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0
+
+/* Forward declarations */
+struct brcm_pcie;
+
+enum {
+ RGR1_SW_INIT_1,
+ EXT_CFG_INDEX,
+ EXT_CFG_DATA,
+};
+
+enum {
+ RGR1_SW_INIT_1_INIT_MASK,
+ RGR1_SW_INIT_1_INIT_SHIFT,
+};
+
+enum pcie_type {
+ GENERIC,
+ BCM7425,
+ BCM7435,
+ BCM4908,
+ BCM7278,
+ BCM2711,
+};
+
+struct pcie_cfg_data {
+ const int *offsets;
+ const enum pcie_type type;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+};
+
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
+};
+
+struct brcm_msi {
+ struct device *dev;
+ void __iomem *base;
+ struct device_node *np;
+ struct irq_domain *msi_domain;
+ struct irq_domain *inner_domain;
+ struct mutex lock; /* guards the alloc/free operations */
+ u64 target_addr;
+ int irq;
+ DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
+ bool legacy;
+ /* Some chips have MSIs in bits [31..24] of a shared register. */
+ int legacy_shift;
+ int nr; /* No. of MSI available, depends on chip */
+ /* This is the base pointer for interrupt status/set/clr regs */
+ void __iomem *intr_base;
+};
+
+/* Internal PCIe Host Controller Information.*/
+struct brcm_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct device_node *np;
+ bool ssc;
+ int gen;
+ u64 msi_target_addr;
+ struct brcm_msi *msi;
+ const int *reg_offsets;
+ enum pcie_type type;
+ struct reset_control *rescal;
+ struct reset_control *perst_reset;
+ int num_memc;
+ u64 memc_size[PCIE_BRCM_MAX_MEMC];
+ u32 hw_rev;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
+};
+
+static inline bool is_bmips(const struct brcm_pcie *pcie)
+{
+ return pcie->type == BCM7435 || pcie->type == BCM7425;
+}
+
+/*
+ * This is to convert the size of the inbound "BAR" region to the
+ * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
+ */
+static int brcm_pcie_encode_ibar_size(u64 size)
+{
+ int log2_in = ilog2(size);
+
+ if (log2_in >= 12 && log2_in <= 15)
+ /* Covers 4KB to 32KB (inclusive) */
+ return (log2_in - 12) + 0x1c;
+ else if (log2_in >= 16 && log2_in <= 35)
+ /* Covers 64KB to 32GB, (inclusive) */
+ return log2_in - 15;
+ /* Something is awry so disable */
+ return 0;
+}
+
+static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
+{
+ u32 pkt = 0;
+
+ pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
+ pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
+ pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
+
+ return pkt;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
+{
+ u32 data;
+ int err;
+
+ writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
+ base + PCIE_RC_DL_MDIO_ADDR);
+ readl(base + PCIE_RC_DL_MDIO_ADDR);
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data,
+ MDIO_RD_DONE(data), 10, 100);
+ *val = FIELD_GET(MDIO_DATA_MASK, data);
+
+ return err;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
+ u8 regad, u16 wrdata)
+{
+ u32 data;
+ int err;
+
+ writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
+ base + PCIE_RC_DL_MDIO_ADDR);
+ readl(base + PCIE_RC_DL_MDIO_ADDR);
+ writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
+
+ err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ MDIO_WT_DONE(data), 10, 100);
+ return err;
+}
+
+/*
+ * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
+ * return value indicates error.
+ */
+static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
+{
+ int pll, ssc;
+ int ret;
+ u32 tmp;
+
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
+ SSC_REGS_ADDR);
+ if (ret < 0)
+ return ret;
+
+ ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+ SSC_CNTL_OFFSET, &tmp);
+ if (ret < 0)
+ return ret;
+
+ u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
+ u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
+ SSC_CNTL_OFFSET, tmp);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+ ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+ SSC_STATUS_OFFSET, &tmp);
+ if (ret < 0)
+ return ret;
+
+ ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
+ pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
+
+ return ssc && pll ? 0 : -EIO;
+}
+
+/* Limits operation to a specific generation (1, 2, or 3) */
+static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
+{
+ u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+ u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+ lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
+ writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+ lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+}
+
+static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
+ unsigned int win, u64 cpu_addr,
+ u64 pcie_addr, u64 size)
+{
+ u32 cpu_addr_mb_high, limit_addr_mb_high;
+ phys_addr_t cpu_addr_mb, limit_addr_mb;
+ int high_addr_shift;
+ u32 tmp;
+
+ /* Set the base of the pcie_addr window */
+ writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
+ writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
+
+ /* Write the addr base & limit lower bits (in MBs) */
+ cpu_addr_mb = cpu_addr / SZ_1M;
+ limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
+
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+ u32p_replace_bits(&tmp, cpu_addr_mb,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+ u32p_replace_bits(&tmp, limit_addr_mb,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+
+ if (is_bmips(pcie))
+ return;
+
+ /* Write the cpu & limit addr upper bits */
+ high_addr_shift =
+ HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+
+ cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+ u32p_replace_bits(&tmp, cpu_addr_mb_high,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+
+ limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+ u32p_replace_bits(&tmp, limit_addr_mb_high,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+}
+
+static struct irq_chip brcm_msi_irq_chip = {
+ .name = "BRCM STB PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info brcm_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &brcm_msi_irq_chip,
+};
+
+static void brcm_pcie_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long status;
+ struct brcm_msi *msi;
+ struct device *dev;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+ msi = irq_desc_get_handler_data(desc);
+ dev = msi->dev;
+
+ status = readl(msi->intr_base + MSI_INT_STATUS);
+ status >>= msi->legacy_shift;
+
+ for_each_set_bit(bit, &status, msi->nr) {
+ int ret;
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
+ dev_dbg(dev, "unexpected MSI\n");
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+
+ msg->address_lo = lower_32_bits(msi->target_addr);
+ msg->address_hi = upper_32_bits(msi->target_addr);
+ msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
+}
+
+static int brcm_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void brcm_msi_ack_irq(struct irq_data *data)
+{
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+ const int shift_amt = data->hwirq + msi->legacy_shift;
+
+ writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
+}
+
+
+static struct irq_chip brcm_msi_bottom_irq_chip = {
+ .name = "BRCM STB MSI",
+ .irq_compose_msi_msg = brcm_msi_compose_msi_msg,
+ .irq_set_affinity = brcm_msi_set_affinity,
+ .irq_ack = brcm_msi_ack_irq,
+};
+
+static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs)
+{
+ int hwirq;
+
+ mutex_lock(&msi->lock);
+ hwirq = bitmap_find_free_region(msi->used, msi->nr,
+ order_base_2(nr_irqs));
+ mutex_unlock(&msi->lock);
+
+ return hwirq;
+}
+
+static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq,
+ unsigned int nr_irqs)
+{
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct brcm_msi *msi = domain->host_data;
+ int hwirq, i;
+
+ hwirq = brcm_msi_alloc(msi, nr_irqs);
+
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &brcm_msi_bottom_irq_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
+ return 0;
+}
+
+static void brcm_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
+
+ brcm_msi_free(msi, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = brcm_irq_domain_alloc,
+ .free = brcm_irq_domain_free,
+};
+
+static int brcm_allocate_domains(struct brcm_msi *msi)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct device *dev = msi->dev;
+
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &brcm_msi_domain_info,
+ msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void brcm_free_domains(struct brcm_msi *msi)
+{
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+}
+
+static void brcm_msi_remove(struct brcm_pcie *pcie)
+{
+ struct brcm_msi *msi = pcie->msi;
+
+ if (!msi)
+ return;
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+ brcm_free_domains(msi);
+}
+
+static void brcm_msi_set_regs(struct brcm_msi *msi)
+{
+ u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
+ BRCM_INT_PCI_MSI_MASK;
+
+ writel(val, msi->intr_base + MSI_INT_MASK_CLR);
+ writel(val, msi->intr_base + MSI_INT_CLR);
+
+ /*
+ * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
+ * enable, which we set to 1.
+ */
+ writel(lower_32_bits(msi->target_addr) | 0x1,
+ msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
+ writel(upper_32_bits(msi->target_addr),
+ msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
+
+ val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
+ writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
+}
+
+static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
+{
+ struct brcm_msi *msi;
+ int irq, ret;
+ struct device *dev = pcie->dev;
+
+ irq = irq_of_parse_and_map(dev->of_node, 1);
+ if (irq <= 0) {
+ dev_err(dev, "cannot map MSI interrupt\n");
+ return -ENODEV;
+ }
+
+ msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ mutex_init(&msi->lock);
+ msi->dev = dev;
+ msi->base = pcie->base;
+ msi->np = pcie->np;
+ msi->target_addr = pcie->msi_target_addr;
+ msi->irq = irq;
+ msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
+
+ /*
+ * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
+ * is large enough.
+ */
+ BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
+
+ if (msi->legacy) {
+ msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
+ msi->legacy_shift = 24;
+ } else {
+ msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_NR;
+ msi->legacy_shift = 0;
+ }
+
+ ret = brcm_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
+
+ brcm_msi_set_regs(msi);
+ pcie->msi = msi;
+
+ return 0;
+}
+
+/* The controller is capable of serving in both RC and EP roles */
+static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
+
+ return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
+}
+
+static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
+{
+ u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
+ u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
+ u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
+
+ return dla && plu;
+}
+
+static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ void __iomem *base = pcie->base;
+ int idx;
+
+ /* Accesses to the RC go right to the RC registers if !devfn */
+ if (pci_is_root_bus(bus))
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
+
+ /* For devices, write to the config space index register */
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
+ writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
+ return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+}
+
+static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ void __iomem *base = pcie->base;
+ int idx;
+
+ /* Accesses to the RC go right to the RC registers if !devfn */
+ if (pci_is_root_bus(bus))
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
+
+ /* For devices, write to the config space index register */
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie);
+}
+
+static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+{
+ if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
+ return;
+
+ if (val)
+ reset_control_assert(pcie->perst_reset);
+ else
+ reset_control_deassert(pcie->perst_reset);
+}
+
+static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp;
+
+ /* Perst bit has moved and assert value is 0 */
+ tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
+ writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+}
+
+static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
+ u64 *rc_bar2_size,
+ u64 *rc_bar2_offset)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ struct device *dev = pcie->dev;
+ u64 lowest_pcie_addr = ~(u64)0;
+ int ret, i = 0;
+ u64 size = 0;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pcie_beg = entry->res->start - entry->offset;
+
+ size += entry->res->end - entry->res->start + 1;
+ if (pcie_beg < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_beg;
+ }
+
+ if (lowest_pcie_addr == ~(u64)0) {
+ dev_err(dev, "DT node has no dma-ranges\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
+ PCIE_BRCM_MAX_MEMC);
+
+ if (ret <= 0) {
+ /* Make an educated guess */
+ pcie->num_memc = 1;
+ pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ } else {
+ pcie->num_memc = ret;
+ }
+
+ /* Each memc is viewed through a "port" that is a power of 2 */
+ for (i = 0, size = 0; i < pcie->num_memc; i++)
+ size += pcie->memc_size[i];
+
+ /* System memory starts at this address in PCIe-space */
+ *rc_bar2_offset = lowest_pcie_addr;
+ /* The sum of all memc views must also be a power of 2 */
+ *rc_bar2_size = 1ULL << fls64(size - 1);
+
+ /*
+ * We validate the inbound memory view even though we should trust
+ * whatever the device-tree provides. This is because of an HW issue on
+ * early Raspberry Pi 4's revisions (bcm2711). It turns out its
+ * firmware has to dynamically edit dma-ranges due to a bug on the
+ * PCIe controller integration, which prohibits any access above the
+ * lower 3GB of memory. Given this, we decided to keep the dma-ranges
+ * in check, avoiding hard to debug device-tree related issues in the
+ * future:
+ *
+ * The PCIe host controller by design must set the inbound viewport to
+ * be a contiguous arrangement of all of the system's memory. In
+ * addition, its size mut be a power of two. To further complicate
+ * matters, the viewport must start on a pcie-address that is aligned
+ * on a multiple of its size. If a portion of the viewport does not
+ * represent system memory -- e.g. 3GB of memory requires a 4GB
+ * viewport -- we can map the outbound memory in or after 3GB and even
+ * though the viewport will overlap the outbound memory the controller
+ * will know to send outbound memory downstream and everything else
+ * upstream.
+ *
+ * For example:
+ *
+ * - The best-case scenario, memory up to 3GB, is to place the inbound
+ * region in the first 4GB of pcie-space, as some legacy devices can
+ * only address 32bits. We would also like to put the MSI under 4GB
+ * as well, since some devices require a 32bit MSI target address.
+ *
+ * - If the system memory is 4GB or larger we cannot start the inbound
+ * region at location 0 (since we have to allow some space for
+ * outbound memory @ 3GB). So instead it will start at the 1x
+ * multiple of its size
+ */
+ if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
+ (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
+ dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
+ *rc_bar2_size, *rc_bar2_offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int brcm_pcie_setup(struct brcm_pcie *pcie)
+{
+ u64 rc_bar2_offset, rc_bar2_size;
+ void __iomem *base = pcie->base;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ u32 tmp, burst, aspm_support;
+ int num_out_wins = 0;
+ int ret, memc;
+
+ /* Reset the bridge */
+ pcie->bridge_sw_init_set(pcie, 1);
+
+ /* Ensure that PERST# is asserted; some bootloaders may deassert it. */
+ if (pcie->type == BCM2711)
+ pcie->perst_set(pcie, 1);
+
+ usleep_range(100, 200);
+
+ /* Take the bridge out of reset */
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ if (is_bmips(pcie))
+ tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ else
+ tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ /* Wait for SerDes to be stable */
+ usleep_range(100, 200);
+
+ /*
+ * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it
+ * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
+ * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
+ */
+ if (is_bmips(pcie))
+ burst = 0x1; /* 256 bytes */
+ else if (pcie->type == BCM2711)
+ burst = 0x0; /* 128 bytes */
+ else if (pcie->type == BCM7278)
+ burst = 0x3; /* 512 bytes */
+ else
+ burst = 0x2; /* 512 bytes */
+
+ /*
+ * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN,
+ * RCB_MPS_MODE, RCB_64B_MODE
+ */
+ tmp = readl(base + PCIE_MISC_MISC_CTRL);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
+ u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
+ writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+ ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
+ &rc_bar2_offset);
+ if (ret)
+ return ret;
+
+ tmp = lower_32_bits(rc_bar2_offset);
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
+ PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
+ writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
+ writel(upper_32_bits(rc_bar2_offset),
+ base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+
+ tmp = readl(base + PCIE_MISC_MISC_CTRL);
+ for (memc = 0; memc < pcie->num_memc; memc++) {
+ u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
+
+ if (memc == 0)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
+ else if (memc == 1)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
+ else if (memc == 2)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
+ }
+ writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+ /*
+ * We ideally want the MSI target address to be located in the 32bit
+ * addressable memory area. Some devices might depend on it. This is
+ * possible either when the inbound window is located above the lower
+ * 4GB or when the inbound area is smaller than 4GB (taking into
+ * account the rounding-up we're forced to perform).
+ */
+ if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
+ else
+ pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
+
+ /* disable the PCIe->GISB memory window (RC_BAR1) */
+ tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+ tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
+ writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+
+ /* disable the PCIe->SCB memory window (RC_BAR3) */
+ tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+ tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
+ writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
+ bridge = pci_host_bridge_from_priv(pcie);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ struct resource *res = entry->res;
+
+ if (resource_type(res) != IORESOURCE_MEM)
+ continue;
+
+ if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
+ dev_err(pcie->dev, "too many outbound wins\n");
+ return -EINVAL;
+ }
+
+ if (is_bmips(pcie)) {
+ u64 start = res->start;
+ unsigned int j, nwins = resource_size(res) / SZ_128M;
+
+ /* bmips PCIe outbound windows have a 128MB max size */
+ if (nwins > BRCM_NUM_PCIE_OUT_WINS)
+ nwins = BRCM_NUM_PCIE_OUT_WINS;
+ for (j = 0; j < nwins; j++, start += SZ_128M)
+ brcm_pcie_set_outbound_win(pcie, j, start,
+ start - entry->offset,
+ SZ_128M);
+ break;
+ }
+ brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
+ res->start - entry->offset,
+ resource_size(res));
+ num_out_wins++;
+ }
+
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ return 0;
+}
+
+static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
+ /* Unassert the fundamental reset */
+ pcie->perst_set(pcie, 0);
+
+ /*
+ * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
+ * sections 2.2, PCIe r5.0, 6.6.1.
+ */
+ msleep(100);
+
+ /*
+ * Give the RC/EP even more time to wake up, before trying to
+ * configure RC. Intermittently check status for link-up, up to a
+ * total of 100ms.
+ */
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
+ if (pcie->ssc) {
+ ret = brcm_pcie_set_ssc(pcie);
+ if (ret == 0)
+ ssc_good = true;
+ else
+ dev_err(dev, "failed attempt to enter ssc mode\n");
+ }
+
+ lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
+ cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
+ nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ dev_info(dev, "link up, %s x%u %s\n",
+ pci_speed_string(pcie_link_speed[cls]), nlw,
+ ssc_good ? "(SSC)" : "(!SSC)");
+
+ /*
+ * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
+ * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
+ */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ return 0;
+}
+
+static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+};
+
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ const size_t size = sizeof(struct subdev_regulators) +
+ sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->of_node) {
+ sr = alloc_subdev_regulators(dev);
+ if (!sr) {
+ dev_info(dev, "Can't allocate regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ pcie->sr = sr;
+
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_info(dev, "No regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "Can't enable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+ }
+ }
+
+no_regulators:
+ brcm_pcie_start_link(pcie);
+ return 0;
+}
+
+static void brcm_pcie_remove_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct subdev_regulators *sr = pcie->sr;
+ struct device *dev = &bus->dev;
+
+ if (!sr)
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "Failed to disable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+}
+
+/* L23 is a low-power PCIe link state */
+static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ int l23, i;
+ u32 tmp;
+
+ /* Assert request for L23 */
+ tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+ writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+ /* Wait up to 36 msec for L23 */
+ tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+ l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
+ for (i = 0; i < 15 && !l23; i++) {
+ usleep_range(2000, 2400);
+ tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+ l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
+ tmp);
+ }
+
+ if (!l23)
+ dev_err(pcie->dev, "failed to enter low-power link state\n");
+}
+
+static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
+{
+ static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
+ static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
+ const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
+ const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
+ u32 tmp, combined_mask = 0;
+ u32 val;
+ void __iomem *base = pcie->base;
+ int i, ret;
+
+ for (i = beg; i != end; start ? i++ : i--) {
+ val = start ? BIT_MASK(shifts[i]) : 0;
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ tmp = (tmp & ~masks[i]) | (val & masks[i]);
+ writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ usleep_range(50, 200);
+ combined_mask |= masks[i];
+ }
+
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ val = start ? combined_mask : 0;
+
+ ret = (tmp & combined_mask) == val ? 0 : -EIO;
+ if (ret)
+ dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
+
+ return ret;
+}
+
+static inline int brcm_phy_start(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+}
+
+static inline int brcm_phy_stop(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+}
+
+static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ int tmp;
+
+ if (brcm_pcie_link_up(pcie))
+ brcm_pcie_enter_l23(pcie);
+ /* Assert fundamental reset */
+ pcie->perst_set(pcie, 1);
+
+ /* Deassert request for L23 in case it was asserted */
+ tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+ writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+ /* Turn off SerDes */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ /* Shutdown PCIe bridge */
+ pcie->bridge_sw_init_set(pcie, 1);
+}
+
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
+ }
+ return (int) *ret;
+}
+
+static int brcm_pcie_suspend_noirq(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ int ret;
+
+ brcm_pcie_turn_off(pcie);
+ /*
+ * If brcm_phy_stop() returns an error, just dev_err(). If we
+ * return the error it will cause the suspend to fail and this is a
+ * forgivable offense that will probably be erased on resume.
+ */
+ if (brcm_phy_stop(pcie))
+ dev_err(dev, "Could not stop phy for suspend\n");
+
+ ret = reset_control_rearm(pcie->rescal);
+ if (ret) {
+ dev_err(dev, "Could not rearm rescal reset\n");
+ return ret;
+ }
+
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
+ clk_disable_unprepare(pcie->clk);
+
+ return 0;
+}
+
+static int brcm_pcie_resume_noirq(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ void __iomem *base;
+ u32 tmp;
+ int ret;
+
+ base = pcie->base;
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(pcie->rescal);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = brcm_phy_start(pcie);
+ if (ret)
+ goto err_reset;
+
+ /* Take bridge out of reset so we can access the SERDES reg */
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ /* SERDES_IDDQ = 0 */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ /* wait for serdes to be stable */
+ udelay(100);
+
+ ret = brcm_pcie_setup(pcie);
+ if (ret)
+ goto err_reset;
+
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_reset;
+ }
+ }
+ }
+
+ ret = brcm_pcie_start_link(pcie);
+ if (ret)
+ goto err_regulator;
+
+ if (pcie->msi)
+ brcm_msi_set_regs(pcie->msi);
+
+ return 0;
+
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
+err_reset:
+ reset_control_rearm(pcie->rescal);
+err_disable_clk:
+ clk_disable_unprepare(pcie->clk);
+ return ret;
+}
+
+static void __brcm_pcie_remove(struct brcm_pcie *pcie)
+{
+ brcm_msi_remove(pcie);
+ brcm_pcie_turn_off(pcie);
+ if (brcm_phy_stop(pcie))
+ dev_err(pcie->dev, "Could not stop phy\n");
+ if (reset_control_rearm(pcie->rescal))
+ dev_err(pcie->dev, "Could not rearm rescal reset\n");
+ clk_disable_unprepare(pcie->clk);
+}
+
+static void brcm_pcie_remove(struct platform_device *pdev)
+{
+ struct brcm_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ __brcm_pcie_remove(pcie);
+}
+
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct of_device_id brcm_pcie_match[] = {
+ { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
+ { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ {},
+};
+
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static struct pci_ops brcm7425_pcie_ops = {
+ .map_bus = brcm7425_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static int brcm_pcie_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct pci_host_bridge *bridge;
+ const struct pcie_cfg_data *data;
+ struct brcm_pcie *pcie;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->dev = &pdev->dev;
+ pcie->np = np;
+ pcie->reg_offsets = data->offsets;
+ pcie->type = data->type;
+ pcie->perst_set = data->perst_set;
+ pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+
+ pcie->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ ret = of_pci_get_max_link_speed(np);
+ pcie->gen = (ret < 0) ? 0 : ret;
+
+ pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ return ret;
+ }
+ pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
+ if (IS_ERR(pcie->rescal)) {
+ clk_disable_unprepare(pcie->clk);
+ return PTR_ERR(pcie->rescal);
+ }
+ pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
+ if (IS_ERR(pcie->perst_reset)) {
+ clk_disable_unprepare(pcie->clk);
+ return PTR_ERR(pcie->perst_reset);
+ }
+
+ ret = reset_control_reset(pcie->rescal);
+ if (ret)
+ dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+
+ ret = brcm_phy_start(pcie);
+ if (ret) {
+ reset_control_rearm(pcie->rescal);
+ clk_disable_unprepare(pcie->clk);
+ return ret;
+ }
+
+ ret = brcm_pcie_setup(pcie);
+ if (ret)
+ goto fail;
+
+ pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+ if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+ if (pci_msi_enabled() && msi_np == pcie->np) {
+ ret = brcm_pcie_enable_msi(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "probe of internal MSI failed");
+ goto fail;
+ }
+ }
+
+ bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->sysdata = pcie;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
+fail:
+ __brcm_pcie_remove(pcie);
+ return ret;
+}
+
+MODULE_DEVICE_TABLE(of, brcm_pcie_match);
+
+static const struct dev_pm_ops brcm_pcie_pm_ops = {
+ .suspend_noirq = brcm_pcie_suspend_noirq,
+ .resume_noirq = brcm_pcie_resume_noirq,
+};
+
+static struct platform_driver brcm_pcie_driver = {
+ .probe = brcm_pcie_probe,
+ .remove_new = brcm_pcie_remove,
+ .driver = {
+ .name = "brcm-pcie",
+ .of_match_table = brcm_pcie_match,
+ .pm = &brcm_pcie_pm_ops,
+ },
+};
+module_platform_driver(brcm_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
new file mode 100644
index 000000000..ad9d5ffcd
--- /dev/null
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for handling the PCIe controller errors on
+ * HiSilicon HIP SoCs.
+ *
+ * Copyright (c) 2020 HiSilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <acpi/ghes.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+
+/* HISI PCIe controller error definitions */
+#define HISI_PCIE_ERR_MISC_REGS 33
+
+#define HISI_PCIE_LOCAL_VALID_VERSION BIT(0)
+#define HISI_PCIE_LOCAL_VALID_SOC_ID BIT(1)
+#define HISI_PCIE_LOCAL_VALID_SOCKET_ID BIT(2)
+#define HISI_PCIE_LOCAL_VALID_NIMBUS_ID BIT(3)
+#define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID BIT(4)
+#define HISI_PCIE_LOCAL_VALID_CORE_ID BIT(5)
+#define HISI_PCIE_LOCAL_VALID_PORT_ID BIT(6)
+#define HISI_PCIE_LOCAL_VALID_ERR_TYPE BIT(7)
+#define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY BIT(8)
+#define HISI_PCIE_LOCAL_VALID_ERR_MISC 9
+
+static guid_t hisi_pcie_sec_guid =
+ GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D,
+ 0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72);
+
+/*
+ * Firmware reports the socket port ID where the error occurred. These
+ * macros convert that to the core ID and core port ID required by the
+ * ACPI reset method.
+ */
+#define HISI_PCIE_PORT_ID(core, v) (((v) >> 1) + ((core) << 3))
+#define HISI_PCIE_CORE_ID(v) ((v) >> 3)
+#define HISI_PCIE_CORE_PORT_ID(v) (((v) & 7) << 1)
+
+struct hisi_pcie_error_data {
+ u64 val_bits;
+ u8 version;
+ u8 soc_id;
+ u8 socket_id;
+ u8 nimbus_id;
+ u8 sub_module_id;
+ u8 core_id;
+ u8 port_id;
+ u8 err_severity;
+ u16 err_type;
+ u8 reserv[2];
+ u32 err_misc[HISI_PCIE_ERR_MISC_REGS];
+};
+
+struct hisi_pcie_error_private {
+ struct notifier_block nb;
+ struct device *dev;
+};
+
+enum hisi_pcie_submodule_id {
+ HISI_PCIE_SUB_MODULE_ID_AP,
+ HISI_PCIE_SUB_MODULE_ID_TL,
+ HISI_PCIE_SUB_MODULE_ID_MAC,
+ HISI_PCIE_SUB_MODULE_ID_DL,
+ HISI_PCIE_SUB_MODULE_ID_SDI,
+};
+
+static const char * const hisi_pcie_sub_module[] = {
+ [HISI_PCIE_SUB_MODULE_ID_AP] = "AP Layer",
+ [HISI_PCIE_SUB_MODULE_ID_TL] = "TL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_MAC] = "MAC Layer",
+ [HISI_PCIE_SUB_MODULE_ID_DL] = "DL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_SDI] = "SDI Layer",
+};
+
+enum hisi_pcie_err_severity {
+ HISI_PCIE_ERR_SEV_RECOVERABLE,
+ HISI_PCIE_ERR_SEV_FATAL,
+ HISI_PCIE_ERR_SEV_CORRECTED,
+ HISI_PCIE_ERR_SEV_NONE,
+};
+
+static const char * const hisi_pcie_error_sev[] = {
+ [HISI_PCIE_ERR_SEV_RECOVERABLE] = "recoverable",
+ [HISI_PCIE_ERR_SEV_FATAL] = "fatal",
+ [HISI_PCIE_ERR_SEV_CORRECTED] = "corrected",
+ [HISI_PCIE_ERR_SEV_NONE] = "none",
+};
+
+static const char *hisi_pcie_get_string(const char * const *array,
+ size_t n, u32 id)
+{
+ u32 index;
+
+ for (index = 0; index < n; index++) {
+ if (index == id && array[index])
+ return array[index];
+ }
+
+ return "unknown";
+}
+
+static int hisi_pcie_port_reset(struct platform_device *pdev,
+ u32 chip_id, u32 port_id)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object arg[3];
+ struct acpi_object_list arg_list;
+ acpi_status s;
+ unsigned long long data = 0;
+
+ arg[0].type = ACPI_TYPE_INTEGER;
+ arg[0].integer.value = chip_id;
+ arg[1].type = ACPI_TYPE_INTEGER;
+ arg[1].integer.value = HISI_PCIE_CORE_ID(port_id);
+ arg[2].type = ACPI_TYPE_INTEGER;
+ arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id);
+
+ arg_list.count = 3;
+ arg_list.pointer = arg;
+
+ s = acpi_evaluate_integer(handle, "RST", &arg_list, &data);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "No RST method\n");
+ return -EIO;
+ }
+
+ if (data) {
+ dev_err(dev, "Failed to Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hisi_pcie_port_do_recovery(struct platform_device *dev,
+ u32 chip_id, u32 port_id)
+{
+ acpi_status s;
+ struct device *device = &dev->dev;
+ acpi_handle root_handle = ACPI_HANDLE(device);
+ struct acpi_pci_root *pci_root;
+ struct pci_bus *root_bus;
+ struct pci_dev *pdev;
+ u32 domain, busnr, devfn;
+
+ s = acpi_get_parent(root_handle, &root_handle);
+ if (ACPI_FAILURE(s))
+ return -ENODEV;
+ pci_root = acpi_pci_find_root(root_handle);
+ if (!pci_root)
+ return -ENODEV;
+ root_bus = pci_root->bus;
+ domain = pci_root->segment;
+
+ busnr = root_bus->number;
+ devfn = PCI_DEVFN(port_id, 0);
+ pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn);
+ if (!pdev) {
+ dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n",
+ domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return -ENODEV;
+ }
+
+ pci_stop_and_remove_bus_device_locked(pdev);
+ pci_dev_put(pdev);
+
+ if (hisi_pcie_port_reset(dev, chip_id, port_id))
+ return -EIO;
+
+ /*
+ * The initialization time of subordinate devices after
+ * hot reset is no more than 1s, which is required by
+ * the PCI spec v5.0 sec 6.6.1. The time will shorten
+ * if Readiness Notifications mechanisms are used. But
+ * wait 1s here to adapt any conditions.
+ */
+ ssleep(1UL);
+
+ /* add root port and downstream devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(root_bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static void hisi_pcie_handle_error(struct platform_device *pdev,
+ const struct hisi_pcie_error_data *edata)
+{
+ struct device *dev = &pdev->dev;
+ int idx, rc;
+ const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)};
+
+ if (edata->val_bits == 0) {
+ dev_warn(dev, "%s: no valid error information\n", __func__);
+ return;
+ }
+
+ dev_info(dev, "\nHISI : HIP : PCIe controller error\n");
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID)
+ dev_info(dev, "Table version = %d\n", edata->version);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID)
+ dev_info(dev, "Socket ID = %d\n", edata->socket_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID)
+ dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID)
+ dev_info(dev, "Sub Module = %s\n",
+ hisi_pcie_get_string(hisi_pcie_sub_module,
+ ARRAY_SIZE(hisi_pcie_sub_module),
+ edata->sub_module_id));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID)
+ dev_info(dev, "Core ID = core%d\n", edata->core_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID)
+ dev_info(dev, "Port ID = port%d\n", edata->port_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY)
+ dev_info(dev, "Error severity = %s\n",
+ hisi_pcie_get_string(hisi_pcie_error_sev,
+ ARRAY_SIZE(hisi_pcie_error_sev),
+ edata->err_severity));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE)
+ dev_info(dev, "Error type = 0x%x\n", edata->err_type);
+
+ dev_info(dev, "Reg Dump:\n");
+ idx = HISI_PCIE_LOCAL_VALID_ERR_MISC;
+ for_each_set_bit_from(idx, valid_bits,
+ HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS)
+ dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC,
+ edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]);
+
+ if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE)
+ return;
+
+ /* Recovery for the PCIe controller errors, try reset
+ * PCI port for the error recovery
+ */
+ rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id,
+ HISI_PCIE_PORT_ID(edata->core_id, edata->port_id));
+ if (rc)
+ dev_info(dev, "fail to do hisi pcie port reset\n");
+}
+
+static int hisi_pcie_notify_error(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct acpi_hest_generic_data *gdata = data;
+ const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata);
+ struct hisi_pcie_error_private *priv;
+ struct device *dev;
+ struct platform_device *pdev;
+ guid_t err_sec_guid;
+ u8 socket;
+
+ import_guid(&err_sec_guid, gdata->section_type);
+ if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid))
+ return NOTIFY_DONE;
+
+ priv = container_of(nb, struct hisi_pcie_error_private, nb);
+ dev = priv->dev;
+
+ if (device_property_read_u8(dev, "socket", &socket))
+ return NOTIFY_DONE;
+
+ if (error_data->socket_id != socket)
+ return NOTIFY_DONE;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ hisi_pcie_handle_error(pdev, error_data);
+
+ return NOTIFY_OK;
+}
+
+static int hisi_pcie_error_handler_probe(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->nb.notifier_call = hisi_pcie_notify_error;
+ priv->dev = &pdev->dev;
+ ret = ghes_register_vendor_record_notifier(&priv->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register hisi pcie controller error handler with apei\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static void hisi_pcie_error_handler_remove(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev);
+
+ ghes_unregister_vendor_record_notifier(&priv->nb);
+}
+
+static const struct acpi_device_id hisi_pcie_acpi_match[] = {
+ { "HISI0361", 0 },
+ { }
+};
+
+static struct platform_driver hisi_pcie_error_handler_driver = {
+ .driver = {
+ .name = "hisi-pcie-error-handler",
+ .acpi_match_table = hisi_pcie_acpi_match,
+ },
+ .probe = hisi_pcie_error_handler_probe,
+ .remove_new = hisi_pcie_error_handler_remove,
+};
+module_platform_driver(hisi_pcie_error_handler_driver);
+
+MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
new file mode 100644
index 000000000..99a999004
--- /dev/null
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/phy/phy.h>
+#include <linux/bcma/bcma.h>
+#include <linux/ioport.h>
+
+#include "pcie-iproc.h"
+
+
+/* NS: CLASS field is R/O, and set to wrong 0x200 value */
+static void bcma_pcie2_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
+
+static int iproc_bcma_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct iproc_pcie *pcie = dev->sysdata;
+ struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
+
+ return bcma_core_irq(bdev, 5);
+}
+
+static int iproc_bcma_pcie_probe(struct bcma_device *bdev)
+{
+ struct device *dev = &bdev->dev;
+ struct iproc_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+
+ pcie->dev = dev;
+
+ pcie->type = IPROC_PCIE_PAXB_BCMA;
+ pcie->base = bdev->io_addr;
+ if (!pcie->base) {
+ dev_err(dev, "no controller registers\n");
+ return -ENOMEM;
+ }
+
+ pcie->base_addr = bdev->addr;
+
+ pcie->mem.start = bdev->addr_s[0];
+ pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+ pcie->mem.name = "PCIe MEM space";
+ pcie->mem.flags = IORESOURCE_MEM;
+ pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ if (ret)
+ return ret;
+
+ pcie->map_irq = iproc_bcma_pcie_map_irq;
+
+ bcma_set_drvdata(bdev, pcie);
+
+ return iproc_pcie_setup(pcie, &bridge->windows);
+}
+
+static void iproc_bcma_pcie_remove(struct bcma_device *bdev)
+{
+ struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
+
+ iproc_pcie_remove(pcie);
+}
+
+static const struct bcma_device_id iproc_bcma_pcie_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ {},
+};
+MODULE_DEVICE_TABLE(bcma, iproc_bcma_pcie_table);
+
+static struct bcma_driver iproc_bcma_pcie_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = iproc_bcma_pcie_table,
+ .probe = iproc_bcma_pcie_probe,
+ .remove = iproc_bcma_pcie_remove,
+};
+module_bcma_driver(iproc_bcma_pcie_driver);
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
new file mode 100644
index 000000000..649fcb449
--- /dev/null
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+
+#include "pcie-iproc.h"
+
+#define IPROC_MSI_INTR_EN_SHIFT 11
+#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
+#define IPROC_MSI_INT_N_EVENT_SHIFT 1
+#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
+#define IPROC_MSI_EQ_EN_SHIFT 0
+#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
+
+#define IPROC_MSI_EQ_MASK 0x3f
+
+/* Max number of GIC interrupts */
+#define NR_HW_IRQS 6
+
+/* Number of entries in each event queue */
+#define EQ_LEN 64
+
+/* Size of each event queue memory region */
+#define EQ_MEM_REGION_SIZE SZ_4K
+
+/* Size of each MSI address region */
+#define MSI_MEM_REGION_SIZE SZ_4K
+
+enum iproc_msi_reg {
+ IPROC_MSI_EQ_PAGE = 0,
+ IPROC_MSI_EQ_PAGE_UPPER,
+ IPROC_MSI_PAGE,
+ IPROC_MSI_PAGE_UPPER,
+ IPROC_MSI_CTRL,
+ IPROC_MSI_EQ_HEAD,
+ IPROC_MSI_EQ_TAIL,
+ IPROC_MSI_INTS_EN,
+ IPROC_MSI_REG_SIZE,
+};
+
+struct iproc_msi;
+
+/**
+ * struct iproc_msi_grp - iProc MSI group
+ *
+ * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
+ * event queue.
+ *
+ * @msi: pointer to iProc MSI data
+ * @gic_irq: GIC interrupt
+ * @eq: Event queue number
+ */
+struct iproc_msi_grp {
+ struct iproc_msi *msi;
+ int gic_irq;
+ unsigned int eq;
+};
+
+/**
+ * struct iproc_msi - iProc event queue based MSI
+ *
+ * Only meant to be used on platforms without MSI support integrated into the
+ * GIC.
+ *
+ * @pcie: pointer to iProc PCIe data
+ * @reg_offsets: MSI register offsets
+ * @grps: MSI groups
+ * @nr_irqs: number of total interrupts connected to GIC
+ * @nr_cpus: number of toal CPUs
+ * @has_inten_reg: indicates the MSI interrupt enable register needs to be
+ * set explicitly (required for some legacy platforms)
+ * @bitmap: MSI vector bitmap
+ * @bitmap_lock: lock to protect access to the MSI bitmap
+ * @nr_msi_vecs: total number of MSI vectors
+ * @inner_domain: inner IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @nr_eq_region: required number of 4K aligned memory region for MSI event
+ * queues
+ * @nr_msi_region: required number of 4K aligned address region for MSI posted
+ * writes
+ * @eq_cpu: pointer to allocated memory region for MSI event queues
+ * @eq_dma: DMA address of MSI event queues
+ * @msi_addr: MSI address
+ */
+struct iproc_msi {
+ struct iproc_pcie *pcie;
+ const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
+ struct iproc_msi_grp *grps;
+ int nr_irqs;
+ int nr_cpus;
+ bool has_inten_reg;
+ unsigned long *bitmap;
+ struct mutex bitmap_lock;
+ unsigned int nr_msi_vecs;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
+ unsigned int nr_eq_region;
+ unsigned int nr_msi_region;
+ void *eq_cpu;
+ dma_addr_t eq_dma;
+ phys_addr_t msi_addr;
+};
+
+static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
+ { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
+};
+
+static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
+ { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
+ { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
+ { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
+ { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
+};
+
+static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
+ enum iproc_msi_reg reg,
+ unsigned int eq)
+{
+ struct iproc_pcie *pcie = msi->pcie;
+
+ return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
+}
+
+static inline void iproc_msi_write_reg(struct iproc_msi *msi,
+ enum iproc_msi_reg reg,
+ int eq, u32 val)
+{
+ struct iproc_pcie *pcie = msi->pcie;
+
+ writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
+}
+
+static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
+{
+ return (hwirq % msi->nr_irqs);
+}
+
+static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
+ unsigned long hwirq)
+{
+ if (msi->nr_msi_region > 1)
+ return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
+ else
+ return hwirq_to_group(msi, hwirq) * sizeof(u32);
+}
+
+static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
+{
+ if (msi->nr_eq_region > 1)
+ return eq * EQ_MEM_REGION_SIZE;
+ else
+ return eq * EQ_LEN * sizeof(u32);
+}
+
+static struct irq_chip iproc_msi_irq_chip = {
+ .name = "iProc-MSI",
+};
+
+static struct msi_domain_info iproc_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .chip = &iproc_msi_irq_chip,
+};
+
+/*
+ * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
+ * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
+ *
+ * The number of MSI groups varies between different iProc SoCs. The total
+ * number of CPU cores also varies. To support MSI IRQ affinity, we
+ * distribute GIC interrupts across all available CPUs. MSI vector is moved
+ * from one GIC interrupt to another to steer to the target CPU.
+ *
+ * Assuming:
+ * - the number of MSI groups is M
+ * - the number of CPU cores is N
+ * - M is always a multiple of N
+ *
+ * Total number of raw MSI vectors = M * 64
+ * Total number of supported MSI vectors = (M * 64) / N
+ */
+static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
+{
+ return (hwirq % msi->nr_cpus);
+}
+
+static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
+ unsigned long hwirq)
+{
+ return (hwirq - hwirq_to_cpu(msi, hwirq));
+}
+
+static int iproc_msi_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+ int target_cpu = cpumask_first(mask);
+ int curr_cpu;
+ int ret;
+
+ curr_cpu = hwirq_to_cpu(msi, data->hwirq);
+ if (curr_cpu == target_cpu)
+ ret = IRQ_SET_MASK_OK_DONE;
+ else {
+ /* steer MSI to the target CPU */
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ ret = IRQ_SET_MASK_OK;
+ }
+
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
+
+ return ret;
+}
+
+static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+ dma_addr_t addr;
+
+ addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq << 5;
+}
+
+static struct irq_chip iproc_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = iproc_msi_irq_set_affinity,
+ .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
+};
+
+static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct iproc_msi *msi = domain->host_data;
+ int hwirq, i;
+
+ if (msi->nr_cpus > 1 && nr_irqs > 1)
+ return -EINVAL;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ /*
+ * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
+ * each time
+ */
+ hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
+ order_base_2(msi->nr_cpus * nr_irqs));
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &iproc_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void iproc_msi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
+ bitmap_release_region(msi->bitmap, hwirq,
+ order_base_2(msi->nr_cpus * nr_irqs));
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = iproc_msi_irq_domain_alloc,
+ .free = iproc_msi_irq_domain_free,
+};
+
+static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
+{
+ u32 __iomem *msg;
+ u32 hwirq;
+ unsigned int offs;
+
+ offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
+ msg = (u32 __iomem *)(msi->eq_cpu + offs);
+ hwirq = readl(msg);
+ hwirq = (hwirq >> 5) + (hwirq & 0x1f);
+
+ /*
+ * Since we have multiple hwirq mapped to a single MSI vector,
+ * now we need to derive the hwirq at CPU0. It can then be used to
+ * mapped back to virq.
+ */
+ return hwirq_to_canonical_hwirq(msi, hwirq);
+}
+
+static void iproc_msi_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct iproc_msi_grp *grp;
+ struct iproc_msi *msi;
+ u32 eq, head, tail, nr_events;
+ unsigned long hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ grp = irq_desc_get_handler_data(desc);
+ msi = grp->msi;
+ eq = grp->eq;
+
+ /*
+ * iProc MSI event queue is tracked by head and tail pointers. Head
+ * pointer indicates the next entry (MSI data) to be consumed by SW in
+ * the queue and needs to be updated by SW. iProc MSI core uses the
+ * tail pointer as the next data insertion point.
+ *
+ * Entries between head and tail pointers contain valid MSI data. MSI
+ * data is guaranteed to be in the event queue memory before the tail
+ * pointer is updated by the iProc MSI core.
+ */
+ head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
+ eq) & IPROC_MSI_EQ_MASK;
+ do {
+ tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
+ eq) & IPROC_MSI_EQ_MASK;
+
+ /*
+ * Figure out total number of events (MSI data) to be
+ * processed.
+ */
+ nr_events = (tail < head) ?
+ (EQ_LEN - (head - tail)) : (tail - head);
+ if (!nr_events)
+ break;
+
+ /* process all outstanding events */
+ while (nr_events--) {
+ hwirq = decode_msi_hwirq(msi, eq, head);
+ generic_handle_domain_irq(msi->inner_domain, hwirq);
+
+ head++;
+ head %= EQ_LEN;
+ }
+
+ /*
+ * Now all outstanding events have been processed. Update the
+ * head pointer.
+ */
+ iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
+
+ /*
+ * Now go read the tail pointer again to see if there are new
+ * outstanding events that came in during the above window.
+ */
+ } while (true);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void iproc_msi_enable(struct iproc_msi *msi)
+{
+ int i, eq;
+ u32 val;
+
+ /* Program memory region for each event queue */
+ for (i = 0; i < msi->nr_eq_region; i++) {
+ dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
+
+ iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
+ lower_32_bits(addr));
+ iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
+ upper_32_bits(addr));
+ }
+
+ /* Program address region for MSI posted writes */
+ for (i = 0; i < msi->nr_msi_region; i++) {
+ phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
+
+ iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
+ lower_32_bits(addr));
+ iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
+ upper_32_bits(addr));
+ }
+
+ for (eq = 0; eq < msi->nr_irqs; eq++) {
+ /* Enable MSI event queue */
+ val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
+ IPROC_MSI_EQ_EN;
+ iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
+
+ /*
+ * Some legacy platforms require the MSI interrupt enable
+ * register to be set explicitly.
+ */
+ if (msi->has_inten_reg) {
+ val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
+ val |= BIT(eq);
+ iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
+ }
+ }
+}
+
+static void iproc_msi_disable(struct iproc_msi *msi)
+{
+ u32 eq, val;
+
+ for (eq = 0; eq < msi->nr_irqs; eq++) {
+ if (msi->has_inten_reg) {
+ val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
+ val &= ~BIT(eq);
+ iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
+ }
+
+ val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
+ val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
+ IPROC_MSI_EQ_EN);
+ iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
+ }
+}
+
+static int iproc_msi_alloc_domains(struct device_node *node,
+ struct iproc_msi *msi)
+{
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain)
+ return -ENOMEM;
+
+ msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &iproc_msi_domain_info,
+ msi->inner_domain);
+ if (!msi->msi_domain) {
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void iproc_msi_free_domains(struct iproc_msi *msi)
+{
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+
+ if (msi->inner_domain)
+ irq_domain_remove(msi->inner_domain);
+}
+
+static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
+{
+ int i;
+
+ for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
+ irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
+ NULL, NULL);
+ }
+}
+
+static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
+{
+ int i, ret;
+ cpumask_var_t mask;
+ struct iproc_pcie *pcie = msi->pcie;
+
+ for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
+ irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
+ iproc_msi_handler,
+ &msi->grps[i]);
+ /* Dedicate GIC interrupt to each CPU core */
+ if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+ ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
+ if (ret)
+ dev_err(pcie->dev,
+ "failed to set affinity for IRQ%d\n",
+ msi->grps[i].gic_irq);
+ free_cpumask_var(mask);
+ } else {
+ dev_err(pcie->dev, "failed to alloc CPU mask\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ /* Free all configured/unconfigured IRQs */
+ iproc_msi_irq_free(msi, cpu);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
+{
+ struct iproc_msi *msi;
+ int i, ret;
+ unsigned int cpu;
+
+ if (!of_device_is_compatible(node, "brcm,iproc-msi"))
+ return -ENODEV;
+
+ if (!of_property_read_bool(node, "msi-controller"))
+ return -ENODEV;
+
+ if (pcie->msi)
+ return -EBUSY;
+
+ msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ msi->pcie = pcie;
+ pcie->msi = msi;
+ msi->msi_addr = pcie->base_addr;
+ mutex_init(&msi->bitmap_lock);
+ msi->nr_cpus = num_possible_cpus();
+
+ if (msi->nr_cpus == 1)
+ iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+
+ msi->nr_irqs = of_irq_count(node);
+ if (!msi->nr_irqs) {
+ dev_err(pcie->dev, "found no MSI GIC interrupt\n");
+ return -ENODEV;
+ }
+
+ if (msi->nr_irqs > NR_HW_IRQS) {
+ dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
+ msi->nr_irqs);
+ msi->nr_irqs = NR_HW_IRQS;
+ }
+
+ if (msi->nr_irqs < msi->nr_cpus) {
+ dev_err(pcie->dev,
+ "not enough GIC interrupts for MSI affinity\n");
+ return -EINVAL;
+ }
+
+ if (msi->nr_irqs % msi->nr_cpus != 0) {
+ msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
+ dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
+ msi->nr_irqs);
+ }
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
+ case IPROC_PCIE_PAXB:
+ msi->reg_offsets = iproc_msi_reg_paxb;
+ msi->nr_eq_region = 1;
+ msi->nr_msi_region = 1;
+ break;
+ case IPROC_PCIE_PAXC:
+ msi->reg_offsets = iproc_msi_reg_paxc;
+ msi->nr_eq_region = msi->nr_irqs;
+ msi->nr_msi_region = msi->nr_irqs;
+ break;
+ default:
+ dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
+ return -EINVAL;
+ }
+
+ msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten");
+
+ msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
+ msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
+ GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
+ GFP_KERNEL);
+ if (!msi->grps)
+ return -ENOMEM;
+
+ for (i = 0; i < msi->nr_irqs; i++) {
+ unsigned int irq = irq_of_parse_and_map(node, i);
+
+ if (!irq) {
+ dev_err(pcie->dev, "unable to parse/map interrupt\n");
+ ret = -ENODEV;
+ goto free_irqs;
+ }
+ msi->grps[i].gic_irq = irq;
+ msi->grps[i].msi = msi;
+ msi->grps[i].eq = i;
+ }
+
+ /* Reserve memory for event queue and make sure memories are zeroed */
+ msi->eq_cpu = dma_alloc_coherent(pcie->dev,
+ msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ &msi->eq_dma, GFP_KERNEL);
+ if (!msi->eq_cpu) {
+ ret = -ENOMEM;
+ goto free_irqs;
+ }
+
+ ret = iproc_msi_alloc_domains(node, msi);
+ if (ret) {
+ dev_err(pcie->dev, "failed to create MSI domains\n");
+ goto free_eq_dma;
+ }
+
+ for_each_online_cpu(cpu) {
+ ret = iproc_msi_irq_setup(msi, cpu);
+ if (ret)
+ goto free_msi_irq;
+ }
+
+ iproc_msi_enable(msi);
+
+ return 0;
+
+free_msi_irq:
+ for_each_online_cpu(cpu)
+ iproc_msi_irq_free(msi, cpu);
+ iproc_msi_free_domains(msi);
+
+free_eq_dma:
+ dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ msi->eq_cpu, msi->eq_dma);
+
+free_irqs:
+ for (i = 0; i < msi->nr_irqs; i++) {
+ if (msi->grps[i].gic_irq)
+ irq_dispose_mapping(msi->grps[i].gic_irq);
+ }
+ pcie->msi = NULL;
+ return ret;
+}
+EXPORT_SYMBOL(iproc_msi_init);
+
+void iproc_msi_exit(struct iproc_pcie *pcie)
+{
+ struct iproc_msi *msi = pcie->msi;
+ unsigned int i, cpu;
+
+ if (!msi)
+ return;
+
+ iproc_msi_disable(msi);
+
+ for_each_online_cpu(cpu)
+ iproc_msi_irq_free(msi, cpu);
+
+ iproc_msi_free_domains(msi);
+
+ dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ msi->eq_cpu, msi->eq_dma);
+
+ for (i = 0; i < msi->nr_irqs; i++) {
+ if (msi->grps[i].gic_irq)
+ irq_dispose_mapping(msi->grps[i].gic_irq);
+ }
+}
+EXPORT_SYMBOL(iproc_msi_exit);
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
new file mode 100644
index 000000000..acdc583d2
--- /dev/null
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "../pci.h"
+#include "pcie-iproc.h"
+
+static const struct of_device_id iproc_pcie_of_match_table[] = {
+ {
+ .compatible = "brcm,iproc-pcie",
+ .data = (int *)IPROC_PCIE_PAXB,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxb-v2",
+ .data = (int *)IPROC_PCIE_PAXB_V2,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxc",
+ .data = (int *)IPROC_PCIE_PAXC,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxc-v2",
+ .data = (int *)IPROC_PCIE_PAXC_V2,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
+
+static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iproc_pcie *pcie;
+ struct device_node *np = dev->of_node;
+ struct resource reg;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+
+ pcie->dev = dev;
+ pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+
+ ret = of_address_to_resource(np, 0, &reg);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain controller resources\n");
+ return ret;
+ }
+
+ pcie->base = devm_pci_remap_cfgspace(dev, reg.start,
+ resource_size(&reg));
+ if (!pcie->base) {
+ dev_err(dev, "unable to map controller registers\n");
+ return -ENOMEM;
+ }
+ pcie->base_addr = reg.start;
+
+ if (of_property_read_bool(np, "brcm,pcie-ob")) {
+ u32 val;
+
+ ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
+ &val);
+ if (ret) {
+ dev_err(dev,
+ "missing brcm,pcie-ob-axi-offset property\n");
+ return ret;
+ }
+ pcie->ob.axi_offset = val;
+ pcie->need_ob_cfg = true;
+ }
+
+ /*
+ * DT nodes are not used by all platforms that use the iProc PCIe
+ * core driver. For platforms that require explicit inbound mapping
+ * configuration, "dma-ranges" would have been present in DT
+ */
+ pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
+
+ /* PHY use is optional */
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ /* PAXC doesn't support legacy IRQs, skip mapping */
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXC:
+ case IPROC_PCIE_PAXC_V2:
+ pcie->map_irq = NULL;
+ break;
+ default:
+ break;
+ }
+
+ ret = iproc_pcie_setup(pcie, &bridge->windows);
+ if (ret) {
+ dev_err(dev, "PCIe controller setup failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+ return 0;
+}
+
+static void iproc_pltfm_pcie_remove(struct platform_device *pdev)
+{
+ struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+ iproc_pcie_remove(pcie);
+}
+
+static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev)
+{
+ struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+ iproc_pcie_shutdown(pcie);
+}
+
+static struct platform_driver iproc_pltfm_pcie_driver = {
+ .driver = {
+ .name = "iproc-pcie",
+ .of_match_table = of_match_ptr(iproc_pcie_of_match_table),
+ },
+ .probe = iproc_pltfm_pcie_probe,
+ .remove_new = iproc_pltfm_pcie_remove,
+ .shutdown = iproc_pltfm_pcie_shutdown,
+};
+module_platform_driver(iproc_pltfm_pcie_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
new file mode 100644
index 000000000..bd1c98b68
--- /dev/null
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -0,0 +1,1598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/msi.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "pcie-iproc.h"
+
+#define EP_PERST_SOURCE_SELECT_SHIFT 2
+#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
+#define EP_MODE_SURVIVE_PERST_SHIFT 1
+#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
+#define RC_PCIE_RST_OUTPUT_SHIFT 0
+#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
+#define PAXC_RESET_MASK 0x7f
+
+#define GIC_V3_CFG_SHIFT 0
+#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT 0
+#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
+
+#define CFG_IND_ADDR_MASK 0x00001ffc
+
+#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
+#define CFG_ADDR_CFG_TYPE_1 1
+
+#define SYS_RC_INTX_MASK 0xf
+
+#define PCIE_PHYLINKUP_SHIFT 3
+#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
+#define PCIE_DL_ACTIVE_SHIFT 2
+#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+
+#define APB_ERR_EN_SHIFT 0
+#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+
+#define CFG_RD_SUCCESS 0
+#define CFG_RD_UR 1
+#define CFG_RD_CRS 2
+#define CFG_RD_CA 3
+#define CFG_RETRY_STATUS 0xffff0001
+#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES 4
+
+#define OARR_VALID_SHIFT 0
+#define OARR_VALID BIT(OARR_VALID_SHIFT)
+#define OARR_SIZE_CFG_SHIFT 1
+
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES 9
+
+#define IMAP_VALID_SHIFT 0
+#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
+
+#define IPROC_PCI_PM_CAP 0x48
+#define IPROC_PCI_PM_CAP_MASK 0xffff
+#define IPROC_PCI_EXP_CAP 0xac
+
+#define IPROC_PCIE_REG_INVALID 0xffff
+
+/**
+ * struct iproc_pcie_ob_map - iProc PCIe outbound mapping controller-specific
+ * parameters
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+ resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+ unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR2/OMAP2 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+ {
+ /* OARR3/OMAP3 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+};
+
+/**
+ * enum iproc_pcie_ib_map_type - iProc PCIe inbound mapping type
+ * @IPROC_PCIE_IB_MAP_MEM: DDR memory
+ * @IPROC_PCIE_IB_MAP_IO: device I/O memory
+ * @IPROC_PCIE_IB_MAP_INVALID: invalid or unused
+ */
+enum iproc_pcie_ib_map_type {
+ IPROC_PCIE_IB_MAP_MEM = 0,
+ IPROC_PCIE_IB_MAP_IO,
+ IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific
+ * parameters
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depending on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+ enum iproc_pcie_ib_map_type type;
+ unsigned int size_unit;
+ resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+ unsigned int nr_sizes;
+ unsigned int nr_windows;
+ u16 imap_addr_offset;
+ u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+ {
+ /* IARR0/IMAP0 */
+ .type = IPROC_PCIE_IB_MAP_IO,
+ .size_unit = SZ_1K,
+ .region_sizes = { 32 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x40,
+ .imap_window_offset = 0x4,
+ },
+ {
+ /* IARR1/IMAP1 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 8 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+
+ },
+ {
+ /* IARR2/IMAP2 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+ 16384 },
+ .nr_sizes = 9,
+ .nr_windows = 1,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR3/IMAP3 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 1, 2, 4, 8, 16, 32 },
+ .nr_sizes = 6,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR4/IMAP4 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 32, 64, 128, 256, 512 },
+ .nr_sizes = 5,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+};
+
+/*
+ * iProc PCIe host registers
+ */
+enum iproc_pcie_reg {
+ /* clock/reset signal control */
+ IPROC_PCIE_CLK_CTRL = 0,
+
+ /*
+ * To allow MSI to be steered to an external MSI controller (e.g., ARM
+ * GICv3 ITS)
+ */
+ IPROC_PCIE_MSI_GIC_MODE,
+
+ /*
+ * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+ * window where the MSI posted writes are written, for the writes to be
+ * interpreted as MSI writes.
+ */
+ IPROC_PCIE_MSI_BASE_ADDR,
+ IPROC_PCIE_MSI_WINDOW_SIZE,
+
+ /*
+ * To hold the address of the register where the MSI writes are
+ * programmed. When ARM GICv3 ITS is used, this should be programmed
+ * with the address of the GITS_TRANSLATER register.
+ */
+ IPROC_PCIE_MSI_ADDR_LO,
+ IPROC_PCIE_MSI_ADDR_HI,
+
+ /* enable MSI */
+ IPROC_PCIE_MSI_EN_CFG,
+
+ /* allow access to root complex configuration space */
+ IPROC_PCIE_CFG_IND_ADDR,
+ IPROC_PCIE_CFG_IND_DATA,
+
+ /* allow access to device configuration space */
+ IPROC_PCIE_CFG_ADDR,
+ IPROC_PCIE_CFG_DATA,
+
+ /* enable INTx */
+ IPROC_PCIE_INTX_EN,
+
+ /* outbound address mapping */
+ IPROC_PCIE_OARR0,
+ IPROC_PCIE_OMAP0,
+ IPROC_PCIE_OARR1,
+ IPROC_PCIE_OMAP1,
+ IPROC_PCIE_OARR2,
+ IPROC_PCIE_OMAP2,
+ IPROC_PCIE_OARR3,
+ IPROC_PCIE_OMAP3,
+
+ /* inbound address mapping */
+ IPROC_PCIE_IARR0,
+ IPROC_PCIE_IMAP0,
+ IPROC_PCIE_IARR1,
+ IPROC_PCIE_IMAP1,
+ IPROC_PCIE_IARR2,
+ IPROC_PCIE_IMAP2,
+ IPROC_PCIE_IARR3,
+ IPROC_PCIE_IMAP3,
+ IPROC_PCIE_IARR4,
+ IPROC_PCIE_IMAP4,
+
+ /* config read status */
+ IPROC_PCIE_CFG_RD_STATUS,
+
+ /* link status */
+ IPROC_PCIE_LINK_STATUS,
+
+ /* enable APB error for unsupported requests */
+ IPROC_PCIE_APB_ERR_EN,
+
+ /* total number of core registers */
+ IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+};
+
+/* iProc PCIe PAXB registers */
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_OARR2] = 0xd60,
+ [IPROC_PCIE_OMAP2] = 0xd68,
+ [IPROC_PCIE_OARR3] = 0xdf0,
+ [IPROC_PCIE_OMAP3] = 0xdf8,
+ [IPROC_PCIE_IARR0] = 0xd00,
+ [IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR1] = 0xd08,
+ [IPROC_PCIE_IMAP1] = 0xd70,
+ [IPROC_PCIE_IARR2] = 0xd10,
+ [IPROC_PCIE_IMAP2] = 0xcc0,
+ [IPROC_PCIE_IARR3] = 0xe00,
+ [IPROC_PCIE_IMAP3] = 0xe08,
+ [IPROC_PCIE_IARR4] = 0xe68,
+ [IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_CFG_RD_STATUS] = 0xee0,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+};
+
+/* iProc PCIe PAXC v1 registers */
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
+ [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
+ [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
+ [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
+ [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
+ [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
+ [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+};
+
+/*
+ * List of device IDs of controllers that have corrupted capability list that
+ * require SW fixup
+ */
+static const u16 iproc_pcie_corrupt_cap_did[] = {
+ 0x16cd,
+ 0x16f0,
+ 0xd802,
+ 0xd804
+};
+
+static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
+{
+ struct iproc_pcie *pcie = bus->sysdata;
+ return pcie;
+}
+
+static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
+{
+ return !!(reg_offset == IPROC_PCIE_REG_INVALID);
+}
+
+static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
+ enum iproc_pcie_reg reg)
+{
+ return pcie->reg_offsets[reg];
+}
+
+static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
+ enum iproc_pcie_reg reg)
+{
+ u16 offset = iproc_pcie_reg_offset(pcie, reg);
+
+ if (iproc_pcie_reg_is_invalid(offset))
+ return 0;
+
+ return readl(pcie->base + offset);
+}
+
+static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
+ enum iproc_pcie_reg reg, u32 val)
+{
+ u16 offset = iproc_pcie_reg_offset(pcie, reg);
+
+ if (iproc_pcie_reg_is_invalid(offset))
+ return;
+
+ writel(val, pcie->base + offset);
+}
+
+/*
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+ bool disable)
+{
+ struct iproc_pcie *pcie = iproc_data(bus);
+ u32 val;
+
+ if (bus->number && pcie->has_apb_err_disable) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+ if (disable)
+ val &= ~APB_ERR_EN;
+ else
+ val |= APB_ERR_EN;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+ }
+}
+
+static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
+ unsigned int busno,
+ unsigned int devfn,
+ int where)
+{
+ u16 offset;
+ u32 val;
+
+ /* EP device access */
+ val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) |
+ CFG_ADDR_CFG_TYPE_1;
+
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
+ offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
+
+ if (iproc_pcie_reg_is_invalid(offset))
+ return NULL;
+
+ return (pcie->base + offset);
+}
+
+static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
+ void __iomem *cfg_data_p)
+{
+ int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
+ unsigned int data;
+ u32 status;
+
+ /*
+ * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * affects config reads of the Vendor ID. For config writes or any
+ * other config reads, the Root may automatically reissue the
+ * configuration request again as a new request.
+ *
+ * For config reads, this hardware returns CFG_RETRY_STATUS data
+ * when it receives a CRS completion, regardless of the address of
+ * the read or the CRS Software Visibility Enable bit. As a
+ * partial workaround for this, we retry in software any read that
+ * returns CFG_RETRY_STATUS.
+ *
+ * Note that a non-Vendor ID config register may have a value of
+ * CFG_RETRY_STATUS. If we read that, we can't distinguish it from
+ * a CRS completion, so we will incorrectly retry the read and
+ * eventually return the wrong data (0xffffffff).
+ */
+ data = readl(cfg_data_p);
+ while (data == CFG_RETRY_STATUS && timeout--) {
+ /*
+ * CRS state is set in CFG_RD status register
+ * This will handle the case where CFG_RETRY_STATUS is
+ * valid config data.
+ */
+ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
+ if (status != CFG_RD_CRS)
+ return data;
+
+ udelay(1);
+ data = readl(cfg_data_p);
+ }
+
+ if (data == CFG_RETRY_STATUS)
+ data = 0xffffffff;
+
+ return data;
+}
+
+static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
+{
+ u32 i, dev_id;
+
+ switch (where & ~0x3) {
+ case PCI_VENDOR_ID:
+ dev_id = *val >> 16;
+
+ /*
+ * Activate fixup for those controllers that have corrupted
+ * capability list registers
+ */
+ for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
+ if (dev_id == iproc_pcie_corrupt_cap_did[i])
+ pcie->fix_paxc_cap = true;
+ break;
+
+ case IPROC_PCI_PM_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise PM, force next capability to PCIe */
+ *val &= ~IPROC_PCI_PM_CAP_MASK;
+ *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise root port, version 2, terminate here */
+ *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
+ PCI_CAP_ID_EXP;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
+ /* Don't advertise CRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct iproc_pcie *pcie = iproc_data(bus);
+ unsigned int busno = bus->number;
+ void __iomem *cfg_data_p;
+ unsigned int data;
+ int ret;
+
+ /* root complex access */
+ if (busno == 0) {
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ if (ret == PCIBIOS_SUCCESSFUL)
+ iproc_pcie_fix_cap(pcie, where, val);
+
+ return ret;
+ }
+
+ cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
+
+ if (!cfg_data_p)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
+
+ *val = data;
+ if (size <= 2)
+ *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ /*
+ * For PAXC and PAXCv2, the total number of PFs that one can enumerate
+ * depends on the firmware configuration. Unfortunately, due to an ASIC
+ * bug, unconfigured PFs cannot be properly hidden from the root
+ * complex. As a result, write access to these PFs will cause bus lock
+ * up on the embedded processor
+ *
+ * Since all unconfigured PFs are left with an incorrect, staled device
+ * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
+ * early here and reject them all
+ */
+#define DEVICE_ID_MASK 0xffff0000
+#define DEVICE_ID_SHIFT 16
+ if (pcie->rej_unconfig_pf &&
+ (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
+ if ((*val & DEVICE_ID_MASK) ==
+ (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Note access to the configuration registers are protected at the higher layer
+ * by 'pci_lock' in drivers/pci/access.c
+ */
+static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
+ int busno, unsigned int devfn,
+ int where)
+{
+ u16 offset;
+
+ /* root complex access */
+ if (busno == 0) {
+ if (PCIE_ECAM_DEVFN(devfn) > 0)
+ return NULL;
+
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
+ where & CFG_IND_ADDR_MASK);
+ offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
+ if (iproc_pcie_reg_is_invalid(offset))
+ return NULL;
+ else
+ return (pcie->base + offset);
+ }
+
+ return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
+}
+
+static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
+ where);
+}
+
+static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
+ unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = readl(addr);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
+ unsigned int devfn, int where,
+ int size, u32 val)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+ struct iproc_pcie *pcie = iproc_data(bus);
+
+ iproc_pcie_apb_err_disable(bus, true);
+ if (pcie->iproc_cfg_read)
+ ret = iproc_pcie_config_read(bus, devfn, where, size, val);
+ else
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_write32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
+static struct pci_ops iproc_pcie_ops = {
+ .map_bus = iproc_pcie_bus_map_cfg_bus,
+ .read = iproc_pcie_config_read32,
+ .write = iproc_pcie_config_write32,
+};
+
+static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
+{
+ u32 val;
+
+ /*
+ * PAXC and the internal emulated endpoint device downstream should not
+ * be reset. If firmware has been loaded on the endpoint device at an
+ * earlier boot stage, reset here causes issues.
+ */
+ if (pcie->ep_is_internal)
+ return;
+
+ if (assert) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+ val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
+ ~RC_PCIE_RST_OUTPUT;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+ udelay(250);
+ } else {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+ val |= RC_PCIE_RST_OUTPUT;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+ msleep(100);
+ }
+}
+
+int iproc_pcie_shutdown(struct iproc_pcie *pcie)
+{
+ iproc_pcie_perst_ctrl(pcie, true);
+ msleep(500);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
+
+static int iproc_pcie_check_link(struct iproc_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ u32 hdr_type, link_ctrl, link_status, class, val;
+ bool link_is_active = false;
+
+ /*
+ * PAXC connects to emulated endpoint devices directly and does not
+ * have a Serdes. Therefore skip the link detection logic here.
+ */
+ if (pcie->ep_is_internal)
+ return 0;
+
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
+ if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
+ dev_err(dev, "PHY or data link is INACTIVE!\n");
+ return -ENODEV;
+ }
+
+ /* make sure we are not in EP mode */
+ iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
+ if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
+ dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
+ return -EFAULT;
+ }
+
+ /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */
+#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
+#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff
+ iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+ 4, &class);
+ class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK;
+ class |= PCI_CLASS_BRIDGE_PCI_NORMAL;
+ iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+ 4, class);
+
+ /* check link status to see if link is active */
+ iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
+ 2, &link_status);
+ if (link_status & PCI_EXP_LNKSTA_NLW)
+ link_is_active = true;
+
+ if (!link_is_active) {
+ /* try GEN 1 link speed */
+#define PCI_TARGET_LINK_SPEED_MASK 0xf
+#define PCI_TARGET_LINK_SPEED_GEN2 0x2
+#define PCI_TARGET_LINK_SPEED_GEN1 0x1
+ iproc_pci_raw_config_read32(pcie, 0,
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+ 4, &link_ctrl);
+ if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
+ PCI_TARGET_LINK_SPEED_GEN2) {
+ link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
+ link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
+ iproc_pci_raw_config_write32(pcie, 0,
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+ 4, link_ctrl);
+ msleep(100);
+
+ iproc_pci_raw_config_read32(pcie, 0,
+ IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
+ 2, &link_status);
+ if (link_status & PCI_EXP_LNKSTA_NLW)
+ link_is_active = true;
+ }
+ }
+
+ dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+
+ return link_is_active ? 0 : -ENODEV;
+}
+
+static void iproc_pcie_enable(struct iproc_pcie *pcie)
+{
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
+}
+
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+ int window_idx)
+{
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+ return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+ int size_idx, u64 axi_addr, u64 pci_addr)
+{
+ struct device *dev = pcie->dev;
+ u16 oarr_offset, omap_offset;
+
+ /*
+ * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+ * on window index.
+ */
+ oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+ window_idx));
+ omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+ window_idx));
+ if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+ iproc_pcie_reg_is_invalid(omap_offset))
+ return -EINVAL;
+
+ /*
+ * Program the OARR registers. The upper 32-bit OARR register is
+ * always right after the lower 32-bit OARR register.
+ */
+ writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+ OARR_VALID, pcie->base + oarr_offset);
+ writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+ /* now program the OMAP registers */
+ writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+ dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
+
+ return 0;
+}
+
+/*
+ * Some iProc SoCs require the SW to configure the outbound address mapping
+ *
+ * Outbound address translation:
+ *
+ * iproc_pcie_address = axi_address - axi_offset
+ * OARR = iproc_pcie_address
+ * OMAP = pci_addr
+ *
+ * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
+ */
+static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ struct device *dev = pcie->dev;
+ int ret = -EINVAL, window_idx, size_idx;
+
+ if (axi_addr < ob->axi_offset) {
+ dev_err(dev, "axi address %pap less than offset %pap\n",
+ &axi_addr, &ob->axi_offset);
+ return -EINVAL;
+ }
+
+ /*
+ * Translate the AXI address to the internal address used by the iProc
+ * PCIe core before programming the OARR
+ */
+ axi_addr -= ob->axi_offset;
+
+ /* iterate through all OARR/OMAP mapping windows */
+ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+ const struct iproc_pcie_ob_map *ob_map =
+ &pcie->ob_map[window_idx];
+
+ /*
+ * If current outbound window is already in use, move on to the
+ * next one.
+ */
+ if (iproc_pcie_ob_is_valid(pcie, window_idx))
+ continue;
+
+ /*
+ * Iterate through all supported window sizes within the
+ * OARR/OMAP pair to find a match. Go through the window sizes
+ * in a descending order.
+ */
+ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+ size_idx--) {
+ resource_size_t window_size =
+ ob_map->window_sizes[size_idx] * SZ_1M;
+
+ /*
+ * Keep iterating until we reach the last window and
+ * with the minimal window size at index zero. In this
+ * case, we take a compromise by mapping it using the
+ * minimum window size that can be supported
+ */
+ if (size < window_size) {
+ if (size_idx > 0 || window_idx > 0)
+ continue;
+
+ /*
+ * For the corner case of reaching the minimal
+ * window size that can be supported on the
+ * last window
+ */
+ axi_addr = ALIGN_DOWN(axi_addr, window_size);
+ pci_addr = ALIGN_DOWN(pci_addr, window_size);
+ size = window_size;
+ }
+
+ if (!IS_ALIGNED(axi_addr, window_size) ||
+ !IS_ALIGNED(pci_addr, window_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /*
+ * Match found! Program both OARR and OMAP and mark
+ * them as a valid entry.
+ */
+ ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+ axi_addr, pci_addr);
+ if (ret)
+ goto err_ob;
+
+ size -= window_size;
+ if (size == 0)
+ return 0;
+
+ /*
+ * If we are here, we are done with the current window,
+ * but not yet finished all mappings. Need to move on
+ * to the next window.
+ */
+ axi_addr += window_size;
+ pci_addr += window_size;
+ break;
+ }
+ }
+
+err_ob:
+ dev_err(dev, "unable to configure outbound mapping\n");
+ dev_err(dev,
+ "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+ &axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+ return ret;
+}
+
+static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
+ struct list_head *resources)
+{
+ struct device *dev = pcie->dev;
+ struct resource_entry *window;
+ int ret;
+
+ resource_list_for_each_entry(window, resources) {
+ struct resource *res = window->res;
+ u64 res_type = resource_type(res);
+
+ switch (res_type) {
+ case IORESOURCE_IO:
+ case IORESOURCE_BUS:
+ break;
+ case IORESOURCE_MEM:
+ ret = iproc_pcie_setup_ob(pcie, res->start,
+ res->start - window->offset,
+ resource_size(res));
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+ int region_idx)
+{
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+ return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+ enum iproc_pcie_ib_map_type type)
+{
+ return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+ int size_idx, int nr_windows, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct device *dev = pcie->dev;
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u16 iarr_offset, imap_offset;
+ u32 val;
+ int window_idx;
+
+ iarr_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, region_idx));
+ imap_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+ if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+ iproc_pcie_reg_is_invalid(imap_offset))
+ return -EINVAL;
+
+ dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+ /*
+ * Program the IARR registers. The upper 32-bit IARR register is
+ * always right after the lower 32-bit IARR register.
+ */
+ writel(lower_32_bits(pci_addr) | BIT(size_idx),
+ pcie->base + iarr_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+ dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
+
+ /*
+ * Now program the IMAP registers. Each IARR region may have one or
+ * more IMAP windows.
+ */
+ size >>= ilog2(nr_windows);
+ for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+ val = readl(pcie->base + imap_offset);
+ val |= lower_32_bits(axi_addr) | IMAP_VALID;
+ writel(val, pcie->base + imap_offset);
+ writel(upper_32_bits(axi_addr),
+ pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+ dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
+
+ imap_offset += ib_map->imap_window_offset;
+ axi_addr += size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+ struct resource_entry *entry,
+ enum iproc_pcie_ib_map_type type)
+{
+ struct device *dev = pcie->dev;
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ int ret;
+ unsigned int region_idx, size_idx;
+ u64 axi_addr = entry->res->start;
+ u64 pci_addr = entry->res->start - entry->offset;
+ resource_size_t size = resource_size(entry->res);
+
+ /* iterate through all IARR mapping regions */
+ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+ const struct iproc_pcie_ib_map *ib_map =
+ &pcie->ib_map[region_idx];
+
+ /*
+ * If current inbound region is already in use or not a
+ * compatible type, move on to the next.
+ */
+ if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+ !iproc_pcie_ib_check_type(ib_map, type))
+ continue;
+
+ /* iterate through all supported region sizes to find a match */
+ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+ resource_size_t region_size =
+ ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+ if (size != region_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, region_size) ||
+ !IS_ALIGNED(pci_addr, region_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /* Match found! Program IARR and all IMAP windows. */
+ ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+ ib_map->nr_windows, axi_addr,
+ pci_addr, size);
+ if (ret)
+ goto err_ib;
+ else
+ return 0;
+
+ }
+ }
+ ret = -EINVAL;
+
+err_ib:
+ dev_err(dev, "unable to configure inbound mapping\n");
+ dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+ &axi_addr, &pci_addr, &size);
+
+ return ret;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int ret = 0;
+
+ resource_list_for_each_entry(entry, &host->dma_ranges) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
+{
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ int idx;
+
+ if (pcie->ep_is_internal)
+ return;
+
+ if (pcie->need_ob_cfg) {
+ /* iterate through all OARR mapping regions */
+ for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_OARR0, idx), 0);
+ }
+ }
+
+ if (pcie->need_ib_cfg) {
+ /* iterate through all IARR mapping regions */
+ for (idx = 0; idx < ib->nr_regions; idx++) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, idx), 0);
+ }
+ }
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+ struct device_node *msi_node,
+ u64 *msi_addr)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ struct resource res;
+
+ /*
+ * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+ * supported external MSI controller that requires steering.
+ */
+ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+ dev_err(dev, "unable to find compatible MSI controller\n");
+ return -ENODEV;
+ }
+
+ /* derive GITS_TRANSLATER address from GICv3 */
+ ret = of_address_to_resource(msi_node, 0, &res);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain MSI controller resources\n");
+ return ret;
+ }
+
+ *msi_addr = res.start + GITS_TRANSLATER;
+ return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ int ret;
+ struct resource_entry entry;
+
+ memset(&entry, 0, sizeof(entry));
+ entry.res = &entry.__res;
+
+ msi_addr &= ~(SZ_32K - 1);
+ entry.res->start = msi_addr;
+ entry.res->end = msi_addr + SZ_32K - 1;
+
+ ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
+ return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
+ bool enable)
+{
+ u32 val;
+
+ if (!enable) {
+ /*
+ * Disable PAXC MSI steering. All write transfers will be
+ * treated as non-MSI transfers
+ */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val &= ~MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+ return;
+ }
+
+ /*
+ * Program bits [43:13] of address of GITS_TRANSLATER register into
+ * bits [30:0] of the MSI base address register. In fact, in all iProc
+ * based SoCs, all I/O register bases are well below the 32-bit
+ * boundary, so we can safely assume bits [43:32] are always zeros.
+ */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+ (u32)(msi_addr >> 13));
+
+ /* use a default 8K window size */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+ /* steering MSI to GICv3 ITS */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+ val |= GIC_V3_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+ /*
+ * Program bits [43:2] of address of GITS_TRANSLATER register into the
+ * iProc MSI address registers.
+ */
+ msi_addr >>= 2;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+ upper_32_bits(msi_addr));
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+ lower_32_bits(msi_addr));
+
+ /* enable MSI */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val |= MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+ struct device_node *msi_node)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ u64 msi_addr;
+
+ ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+ if (ret < 0) {
+ dev_err(dev, "msi steering failed\n");
+ return ret;
+ }
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_V2:
+ ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+ if (ret)
+ return ret;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
+{
+ struct device_node *msi_node;
+ int ret;
+
+ /*
+ * Either the "msi-parent" or the "msi-map" phandle needs to exist
+ * for us to obtain the MSI node.
+ */
+
+ msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
+ if (!msi_node) {
+ const __be32 *msi_map = NULL;
+ int len;
+ u32 phandle;
+
+ msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+ if (!msi_map)
+ return -ENODEV;
+
+ phandle = be32_to_cpup(msi_map + 1);
+ msi_node = of_find_node_by_phandle(phandle);
+ if (!msi_node)
+ return -ENODEV;
+ }
+
+ /*
+ * Certain revisions of the iProc PCIe controller require additional
+ * configurations to steer the MSI writes towards an external MSI
+ * controller.
+ */
+ if (pcie->need_msi_steer) {
+ ret = iproc_pcie_msi_steer(pcie, msi_node);
+ if (ret)
+ goto out_put_node;
+ }
+
+ /*
+ * If another MSI controller is being used, the call below should fail
+ * but that is okay
+ */
+ ret = iproc_msi_init(pcie, msi_node);
+
+out_put_node:
+ of_node_put(msi_node);
+ return ret;
+}
+
+static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
+{
+ iproc_msi_exit(pcie);
+}
+
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int reg_idx;
+ const u16 *regs;
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
+ regs = iproc_pcie_reg_paxb_bcma;
+ break;
+ case IPROC_PCIE_PAXB:
+ regs = iproc_pcie_reg_paxb;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+ }
+ break;
+ case IPROC_PCIE_PAXB_V2:
+ regs = iproc_pcie_reg_paxb_v2;
+ pcie->iproc_cfg_read = true;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_v2_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+ }
+ pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+ pcie->ib_map = paxb_v2_ib_map;
+ pcie->need_msi_steer = true;
+ dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
+ CFG_RETRY_STATUS);
+ break;
+ case IPROC_PCIE_PAXC:
+ regs = iproc_pcie_reg_paxc;
+ pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ regs = iproc_pcie_reg_paxc_v2;
+ pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
+ pcie->need_msi_steer = true;
+ break;
+ default:
+ dev_err(dev, "incompatible iProc PCIe interface\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+ sizeof(*pcie->reg_offsets),
+ GFP_KERNEL);
+ if (!pcie->reg_offsets)
+ return -ENOMEM;
+
+ /* go through the register table and populate all valid registers */
+ pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+ IPROC_PCIE_REG_INVALID : regs[0];
+ for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+ pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+ regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+ return 0;
+}
+
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
+{
+ struct device *dev;
+ int ret;
+ struct pci_dev *pdev;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ dev = pcie->dev;
+
+ ret = iproc_pcie_rev_init(pcie);
+ if (ret) {
+ dev_err(dev, "unable to initialize controller parameters\n");
+ return ret;
+ }
+
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ dev_err(dev, "unable to initialize PCIe PHY\n");
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err(dev, "unable to power on PCIe PHY\n");
+ goto err_exit_phy;
+ }
+
+ iproc_pcie_perst_ctrl(pcie, true);
+ iproc_pcie_perst_ctrl(pcie, false);
+
+ iproc_pcie_invalidate_mapping(pcie);
+
+ if (pcie->need_ob_cfg) {
+ ret = iproc_pcie_map_ranges(pcie, res);
+ if (ret) {
+ dev_err(dev, "map failed\n");
+ goto err_power_off_phy;
+ }
+ }
+
+ if (pcie->need_ib_cfg) {
+ ret = iproc_pcie_map_dma_ranges(pcie);
+ if (ret && ret != -ENOENT)
+ goto err_power_off_phy;
+ }
+
+ ret = iproc_pcie_check_link(pcie);
+ if (ret) {
+ dev_err(dev, "no PCIe EP device detected\n");
+ goto err_power_off_phy;
+ }
+
+ iproc_pcie_enable(pcie);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ if (iproc_pcie_msi_enable(pcie))
+ dev_info(dev, "not using iProc MSI\n");
+
+ host->ops = &iproc_pcie_ops;
+ host->sysdata = pcie;
+ host->map_irq = pcie->map_irq;
+
+ ret = pci_host_probe(host);
+ if (ret < 0) {
+ dev_err(dev, "failed to scan host: %d\n", ret);
+ goto err_power_off_phy;
+ }
+
+ for_each_pci_bridge(pdev, host->bus) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_print_link_status(pdev);
+ }
+
+ return 0;
+
+err_power_off_phy:
+ phy_power_off(pcie->phy);
+err_exit_phy:
+ phy_exit(pcie->phy);
+ return ret;
+}
+EXPORT_SYMBOL(iproc_pcie_setup);
+
+void iproc_pcie_remove(struct iproc_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+
+ iproc_pcie_msi_disable(pcie);
+
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+EXPORT_SYMBOL(iproc_pcie_remove);
+
+/*
+ * The MSI parsing logic in certain revisions of Broadcom PAXC based root
+ * complex does not work and needs to be disabled
+ */
+static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
+{
+ struct iproc_pcie *pcie = iproc_data(pdev->bus);
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
+ quirk_paxc_disable_msi_parsing);
+
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+ /*
+ * The PCI config space is shared with the PAXC root port and the first
+ * Ethernet device. So, we need to workaround this by telling the PCI
+ * code that the bridge is not an Ethernet device.
+ */
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+
+ /*
+ * MPSS is not being set properly (as it is currently 0). This is
+ * because that area of the PCI config space is hard coded to zero, and
+ * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
+ * so that the MPS can be set to the real max value.
+ */
+ pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h
new file mode 100644
index 000000000..969ded03b
--- /dev/null
+++ b/drivers/pci/controller/pcie-iproc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ */
+
+#ifndef _PCIE_IPROC_H
+#define _PCIE_IPROC_H
+
+/**
+ * enum iproc_pcie_type - iProc PCIe interface type
+ * @IPROC_PCIE_PAXB_BCMA: BCMA-based host controllers
+ * @IPROC_PCIE_PAXB: PAXB-based host controllers for
+ * NS, NSP, Cygnus, NS2, and Pegasus SOCs
+ * @IPROC_PCIE_PAXB_V2: PAXB-based host controllers for Stingray SoCs
+ * @IPROC_PCIE_PAXC: PAXC-based host controllers
+ * @IPROC_PCIE_PAXC_V2: PAXC-based host controllers (second generation)
+ *
+ * PAXB is the wrapper used in root complex that can be connected to an
+ * external endpoint device.
+ *
+ * PAXC is the wrapper used in root complex dedicated for internal emulated
+ * endpoint devices.
+ */
+enum iproc_pcie_type {
+ IPROC_PCIE_PAXB_BCMA = 0,
+ IPROC_PCIE_PAXB,
+ IPROC_PCIE_PAXB_V2,
+ IPROC_PCIE_PAXC,
+ IPROC_PCIE_PAXC_V2,
+};
+
+/**
+ * struct iproc_pcie_ob - iProc PCIe outbound mapping
+ * @axi_offset: offset from the AXI address to the internal address used by
+ * the iProc PCIe core
+ * @nr_windows: total number of supported outbound mapping windows
+ */
+struct iproc_pcie_ob {
+ resource_size_t axi_offset;
+ unsigned int nr_windows;
+};
+
+/**
+ * struct iproc_pcie_ib - iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+ unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
+struct iproc_msi;
+
+/**
+ * struct iproc_pcie - iProc PCIe device
+ * @dev: pointer to device data structure
+ * @type: iProc PCIe interface type
+ * @reg_offsets: register offsets
+ * @base: PCIe host controller I/O register base
+ * @base_addr: PCIe host controller register base physical address
+ * @mem: host bridge memory window resource
+ * @phy: optional PHY device that controls the Serdes
+ * @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @iproc_cfg_read: indicates the iProc config read function should be used
+ * @rej_unconfig_pf: indicates the root complex needs to detect and reject
+ * enumeration against unconfigured physical functions emulated in the ASIC
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ * @fix_paxc_cap: indicates the controller has corrupted capability list in its
+ * config space registers and requires SW based fixup
+ *
+ * @need_ob_cfg: indicates SW needs to configure the outbound mapping window
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @need_ib_cfg: indicates SW needs to configure the inbound mapping window
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
+ * @msi: MSI data
+ */
+struct iproc_pcie {
+ struct device *dev;
+ enum iproc_pcie_type type;
+ u16 *reg_offsets;
+ void __iomem *base;
+ phys_addr_t base_addr;
+ struct resource mem;
+ struct phy *phy;
+ int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool ep_is_internal;
+ bool iproc_cfg_read;
+ bool rej_unconfig_pf;
+ bool has_apb_err_disable;
+ bool fix_paxc_cap;
+
+ bool need_ob_cfg;
+ struct iproc_pcie_ob ob;
+ const struct iproc_pcie_ob_map *ob_map;
+
+ bool need_ib_cfg;
+ struct iproc_pcie_ib ib;
+ const struct iproc_pcie_ib_map *ib_map;
+
+ bool need_msi_steer;
+ struct iproc_msi *msi;
+};
+
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
+void iproc_pcie_remove(struct iproc_pcie *pcie);
+int iproc_pcie_shutdown(struct iproc_pcie *pcie);
+
+#ifdef CONFIG_PCIE_IPROC_MSI
+int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
+void iproc_msi_exit(struct iproc_pcie *pcie);
+#else
+static inline int iproc_msi_init(struct iproc_pcie *pcie,
+ struct device_node *node)
+{
+ return -ENODEV;
+}
+static inline void iproc_msi_exit(struct iproc_pcie *pcie)
+{
+}
+#endif
+
+#endif /* _PCIE_IPROC_H */
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
new file mode 100644
index 000000000..975b3024f
--- /dev/null
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -0,0 +1,1094 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Jianjun Wang <jianjun.wang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+#define PCIE_SETTING_REG 0x80
+#define PCIE_PCI_IDS_1 0x9c
+#define PCI_CLASS(class) (class << 8)
+#define PCIE_RC_MODE BIT(0)
+
+#define PCIE_CFGNUM_REG 0x140
+#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
+#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
+#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
+#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
+#define PCIE_CFG_OFFSET_ADDR 0x1000
+#define PCIE_CFG_HEADER(bus, devfn) \
+ (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
+
+#define PCIE_RST_CTRL_REG 0x148
+#define PCIE_MAC_RSTB BIT(0)
+#define PCIE_PHY_RSTB BIT(1)
+#define PCIE_BRG_RSTB BIT(2)
+#define PCIE_PE_RSTB BIT(3)
+
+#define PCIE_LTSSM_STATUS_REG 0x150
+#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
+#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
+#define PCIE_LTSSM_STATE_L2_IDLE 0x14
+
+#define PCIE_LINK_STATUS_REG 0x154
+#define PCIE_PORT_LINKUP BIT(8)
+
+#define PCIE_MSI_SET_NUM 8
+#define PCIE_MSI_IRQS_PER_SET 32
+#define PCIE_MSI_IRQS_NUM \
+ (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
+
+#define PCIE_INT_ENABLE_REG 0x180
+#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
+#define PCIE_MSI_SHIFT 8
+#define PCIE_INTX_SHIFT 24
+#define PCIE_INTX_ENABLE \
+ GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
+
+#define PCIE_INT_STATUS_REG 0x184
+#define PCIE_MSI_SET_ENABLE_REG 0x190
+#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+
+#define PCIE_MSI_SET_BASE_REG 0xc00
+#define PCIE_MSI_SET_OFFSET 0x10
+#define PCIE_MSI_SET_STATUS_OFFSET 0x04
+#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
+
+#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
+#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+
+#define PCIE_ICMD_PM_REG 0x198
+#define PCIE_TURN_OFF_LINK BIT(4)
+
+#define PCIE_MISC_CTRL_REG 0x348
+#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
+
+#define PCIE_TRANS_TABLE_BASE_REG 0x800
+#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
+#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
+#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
+#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
+#define PCIE_ATR_TLB_SET_OFFSET 0x20
+
+#define PCIE_MAX_TRANS_TABLES 8
+#define PCIE_ATR_EN BIT(0)
+#define PCIE_ATR_SIZE(size) \
+ (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
+#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
+#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
+#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
+#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
+#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
+/**
+ * struct mtk_msi_set - MSI information for each set
+ * @base: IO mapped register base
+ * @msg_addr: MSI message address
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ */
+struct mtk_msi_set {
+ void __iomem *base;
+ phys_addr_t msg_addr;
+ u32 saved_irq_state;
+};
+
+/**
+ * struct mtk_gen3_pcie - PCIe port information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @reg_base: physical register base
+ * @mac_reset: MAC reset control
+ * @phy_reset: PHY reset control
+ * @phy: PHY controller block
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count for this port
+ * @irq: PCIe controller interrupt number
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ * @irq_lock: lock protecting IRQ register access
+ * @intx_domain: legacy INTx IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @msi_bottom_domain: MSI IRQ bottom domain
+ * @msi_sets: MSI sets information
+ * @lock: lock protecting IRQ bit map
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_gen3_pcie {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t reg_base;
+ struct reset_control *mac_reset;
+ struct reset_control *phy_reset;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ int irq;
+ u32 saved_irq_state;
+ raw_spinlock_t irq_lock;
+ struct irq_domain *intx_domain;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_bottom_domain;
+ struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
+ struct mutex lock;
+ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+};
+
+/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
+static const char *const ltssm_str[] = {
+ "detect.quiet", /* 0x00 */
+ "detect.active", /* 0x01 */
+ "polling.active", /* 0x02 */
+ "polling.compliance", /* 0x03 */
+ "polling.configuration", /* 0x04 */
+ "config.linkwidthstart", /* 0x05 */
+ "config.linkwidthaccept", /* 0x06 */
+ "config.lanenumwait", /* 0x07 */
+ "config.lanenumaccept", /* 0x08 */
+ "config.complete", /* 0x09 */
+ "config.idle", /* 0x0A */
+ "recovery.receiverlock", /* 0x0B */
+ "recovery.equalization", /* 0x0C */
+ "recovery.speed", /* 0x0D */
+ "recovery.receiverconfig", /* 0x0E */
+ "recovery.idle", /* 0x0F */
+ "L0", /* 0x10 */
+ "L0s", /* 0x11 */
+ "L1.entry", /* 0x12 */
+ "L1.idle", /* 0x13 */
+ "L2.idle", /* 0x14 */
+ "L2.transmitwake", /* 0x15 */
+ "disable", /* 0x16 */
+ "loopback.entry", /* 0x17 */
+ "loopback.active", /* 0x18 */
+ "loopback.exit", /* 0x19 */
+ "hotreset", /* 0x1A */
+};
+
+/**
+ * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
+ * @bus: PCI bus to query
+ * @devfn: device/function number
+ * @where: offset in config space
+ * @size: data size in TLP header
+ *
+ * Set byte enable field and device information in configuration TLP header.
+ */
+static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
+ int where, int size)
+{
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
+ int bytes;
+ u32 val;
+
+ bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
+
+ val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
+ PCIE_CFG_HEADER(bus->number, devfn);
+
+ writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
+
+ return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ return pci_generic_config_read32(bus, devfn, where, size, val);
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ if (size <= 2)
+ val <<= (where & 0x3) * 8;
+
+ return pci_generic_config_write32(bus, devfn, where, 4, val);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+ .map_bus = mtk_pcie_map_bus,
+ .read = mtk_pcie_config_read,
+ .write = mtk_pcie_config_write,
+};
+
+static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
+ resource_size_t cpu_addr,
+ resource_size_t pci_addr,
+ resource_size_t size,
+ unsigned long type, int *num)
+{
+ resource_size_t remaining = size;
+ resource_size_t table_size;
+ resource_size_t addr_align;
+ const char *range_type;
+ void __iomem *table;
+ u32 val;
+
+ while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
+ /* Table size needs to be a power of 2 */
+ table_size = BIT(fls(remaining) - 1);
+
+ if (cpu_addr > 0) {
+ addr_align = BIT(ffs(cpu_addr) - 1);
+ table_size = min(table_size, addr_align);
+ }
+
+ /* Minimum size of translate table is 4KiB */
+ if (table_size < 0x1000) {
+ dev_err(pcie->dev, "illegal table size %#llx\n",
+ (unsigned long long)table_size);
+ return -EINVAL;
+ }
+
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
+ writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+
+ if (type == IORESOURCE_IO) {
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ range_type = "IO";
+ } else {
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ range_type = "MEM";
+ }
+
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, *num, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr, (unsigned long long)table_size);
+
+ cpu_addr += table_size;
+ pci_addr += table_size;
+ remaining -= table_size;
+ (*num)++;
+ }
+
+ if (remaining)
+ dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+
+ return 0;
+}
+
+static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
+
+ msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+ msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+
+ /* Configure the MSI capture address */
+ writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
+ writel_relaxed(upper_32_bits(msi_set->msg_addr),
+ pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
+ i * PCIE_MSI_SET_ADDR_HI_OFFSET);
+ }
+
+ val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
+ val |= PCIE_MSI_SET_ENABLE;
+ writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
+
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
+ val |= PCIE_MSI_ENABLE;
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+}
+
+static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
+{
+ struct resource_entry *entry;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ unsigned int table_index = 0;
+ int err;
+ u32 val;
+
+ /* Set as RC mode */
+ val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
+ val |= PCIE_RC_MODE;
+ writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
+
+ /* Set class code */
+ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
+ val &= ~GENMASK(31, 8);
+ val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
+ writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
+
+ /* Mask all INTx interrupts */
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
+ val &= ~PCIE_INTX_ENABLE;
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+
+ /* Disable DVFSRC voltage request */
+ val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
+ val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
+ writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
+
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
+ * and 2.2.1 (Initial Power-Up (G3 to S0)).
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(100);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /* Check if the link is up or not */
+ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
+ if (err) {
+ const char *ltssm_state;
+ int ltssm_index;
+
+ val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
+ ltssm_index = PCIE_LTSSM_STATE(val);
+ ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
+ "Unknown state" : ltssm_str[ltssm_index];
+ dev_err(pcie->dev,
+ "PCIe link down, current LTSSM state: %s (%#x)\n",
+ ltssm_state, val);
+ return err;
+ }
+
+ mtk_pcie_enable_msi(pcie);
+
+ /* Set PCIe translation windows */
+ resource_list_for_each_entry(entry, &host->windows) {
+ struct resource *res = entry->res;
+ unsigned long type = resource_type(res);
+ resource_size_t cpu_addr;
+ resource_size_t pci_addr;
+ resource_size_t size;
+
+ if (type == IORESOURCE_IO)
+ cpu_addr = pci_pio_to_address(res->start);
+ else if (type == IORESOURCE_MEM)
+ cpu_addr = res->start;
+ else
+ continue;
+
+ pci_addr = res->start - entry->offset;
+ size = resource_size(res);
+ err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
+ type, &table_index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void mtk_pcie_msi_irq_mask(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+ irq_chip_mask_parent(data);
+}
+
+static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+ irq_chip_unmask_parent(data);
+}
+
+static struct irq_chip mtk_msi_irq_chip = {
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = mtk_pcie_msi_irq_mask,
+ .irq_unmask = mtk_pcie_msi_irq_unmask,
+ .name = "MSI",
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ msg->address_hi = upper_32_bits(msi_set->msg_addr);
+ msg->address_lo = lower_32_bits(msi_set->msg_addr);
+ msg->data = hwirq;
+ dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+ hwirq, msg->address_hi, msg->address_lo, msg->data);
+}
+
+static void mtk_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
+}
+
+static void mtk_msi_bottom_irq_mask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val &= ~BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val |= BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .irq_ack = mtk_msi_bottom_irq_ack,
+ .irq_mask = mtk_msi_bottom_irq_mask,
+ .irq_unmask = mtk_msi_bottom_irq_unmask,
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "MSI",
+};
+
+static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct mtk_gen3_pcie *pcie = domain->host_data;
+ struct mtk_msi_set *msi_set;
+ int i, hwirq, set_idx;
+
+ mutex_lock(&pcie->lock);
+
+ hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
+ msi_set = &pcie->msi_sets[set_idx];
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &mtk_msi_bottom_irq_chip, msi_set,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mtk_gen3_pcie *pcie = domain->host_data;
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&pcie->lock);
+
+ bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
+ .alloc = mtk_msi_bottom_domain_alloc,
+ .free = mtk_msi_bottom_domain_free,
+};
+
+static void mtk_intx_mask(struct irq_data *data)
+{
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
+ val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static void mtk_intx_unmask(struct irq_data *data)
+{
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
+ val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+/**
+ * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
+ * @data: pointer to chip specific data
+ *
+ * As an emulated level IRQ, its interrupt status will remain
+ * until the corresponding de-assert message is received; hence that
+ * the status can only be cleared when the interrupt has been serviced.
+ */
+static void mtk_intx_eoi(struct irq_data *data)
+{
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq + PCIE_INTX_SHIFT;
+ writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
+}
+
+static struct irq_chip mtk_intx_irq_chip = {
+ .irq_mask = mtk_intx_mask,
+ .irq_unmask = mtk_intx_unmask,
+ .irq_eoi = mtk_intx_eoi,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "INTx",
+};
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
+ handle_fasteoi_irq, "INTx");
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *intc_node, *node = dev->of_node;
+ int ret;
+
+ raw_spin_lock_init(&pcie->irq_lock);
+
+ /* Setup INTx */
+ intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!intc_node) {
+ dev_err(dev, "missing interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ dev_err(dev, "failed to create INTx IRQ domain\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ /* Setup MSI */
+ mutex_init(&pcie->lock);
+
+ pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
+ if (!pcie->msi_bottom_domain) {
+ dev_err(dev, "failed to create MSI bottom domain\n");
+ ret = -ENODEV;
+ goto err_msi_bottom_domain;
+ }
+
+ pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
+ &mtk_msi_domain_info,
+ pcie->msi_bottom_domain);
+ if (!pcie->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ ret = -ENODEV;
+ goto err_msi_domain;
+ }
+
+ of_node_put(intc_node);
+ return 0;
+
+err_msi_domain:
+ irq_domain_remove(pcie->msi_bottom_domain);
+err_msi_bottom_domain:
+ irq_domain_remove(pcie->intx_domain);
+out_put_node:
+ of_node_put(intc_node);
+ return ret;
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+
+ if (pcie->intx_domain)
+ irq_domain_remove(pcie->intx_domain);
+
+ if (pcie->msi_domain)
+ irq_domain_remove(pcie->msi_domain);
+
+ if (pcie->msi_bottom_domain)
+ irq_domain_remove(pcie->msi_bottom_domain);
+
+ irq_dispose_mapping(pcie->irq);
+}
+
+static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
+{
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
+ unsigned long msi_enable, msi_status;
+ irq_hw_number_t bit, hwirq;
+
+ msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+
+ do {
+ msi_status = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_STATUS_OFFSET);
+ msi_status &= msi_enable;
+ if (!msi_status)
+ break;
+
+ for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
+ hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
+ generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
+ }
+ } while (true);
+}
+
+static void mtk_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long status;
+ irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
+ for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
+ PCIE_INTX_SHIFT)
+ generic_handle_domain_irq(pcie->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
+
+ irq_bit = PCIE_MSI_SHIFT;
+ for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
+ PCIE_MSI_SHIFT) {
+ mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
+
+ writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ err = mtk_pcie_init_irq_domains(pcie);
+ if (err)
+ return err;
+
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
+
+ return 0;
+}
+
+static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+ int ret;
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
+ if (!regs)
+ return -EINVAL;
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base)) {
+ dev_err(dev, "failed to map register base\n");
+ return PTR_ERR(pcie->base);
+ }
+
+ pcie->reg_base = regs->start;
+
+ pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(pcie->phy_reset)) {
+ ret = PTR_ERR(pcie->phy_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY reset\n");
+
+ return ret;
+ }
+
+ pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+ if (IS_ERR(pcie->mac_reset)) {
+ ret = PTR_ERR(pcie->mac_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get MAC reset\n");
+
+ return ret;
+ }
+
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY\n");
+
+ return ret;
+ }
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0) {
+ dev_err(dev, "failed to get clocks\n");
+ return pcie->num_clks;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int err;
+
+ /* PHY power on and enable pipe clock */
+ reset_control_deassert(pcie->phy_reset);
+
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ goto err_phy_init;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ /* MAC power on and enable transaction layer clocks */
+ reset_control_deassert(pcie->mac_reset);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to enable clocks\n");
+ goto err_clk_init;
+ }
+
+ return 0;
+
+err_clk_init:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_assert(pcie->mac_reset);
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+err_phy_init:
+ reset_control_assert(pcie->phy_reset);
+
+ return err;
+}
+
+static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
+{
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+ reset_control_assert(pcie->mac_reset);
+
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ reset_control_assert(pcie->phy_reset);
+}
+
+static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
+{
+ int err;
+
+ err = mtk_pcie_parse_port(pcie);
+ if (err)
+ return err;
+
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_assert(pcie->phy_reset);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(10, 20);
+
+ /* Don't touch the hardware registers before power up */
+ err = mtk_pcie_power_up(pcie);
+ if (err)
+ return err;
+
+ /* Try link up */
+ err = mtk_pcie_startup_port(pcie);
+ if (err)
+ goto err_setup;
+
+ err = mtk_pcie_setup_irq(pcie);
+ if (err)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ mtk_pcie_power_down(pcie);
+
+ return err;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_gen3_pcie *pcie;
+ struct pci_host_bridge *host;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!host)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(host);
+
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+
+ err = mtk_pcie_setup(pcie);
+ if (err)
+ return err;
+
+ host->ops = &mtk_pcie_ops;
+ host->sysdata = pcie;
+
+ err = pci_host_probe(host);
+ if (err) {
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ pci_unlock_rescan_remove();
+
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
+}
+
+static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
+{
+ int i;
+
+ raw_spin_lock(&pcie->irq_lock);
+
+ pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
+
+ msi_set->saved_irq_state = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&pcie->irq_lock);
+}
+
+static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
+{
+ int i;
+
+ raw_spin_lock(&pcie->irq_lock);
+
+ writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
+
+ writel_relaxed(msi_set->saved_irq_state,
+ msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&pcie->irq_lock);
+}
+
+static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+
+ val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
+ val |= PCIE_TURN_OFF_LINK;
+ writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
+
+ /* Check the link is L2 */
+ return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
+ (PCIE_LTSSM_STATE(val) ==
+ PCIE_LTSSM_STATE_L2_IDLE), 20,
+ 50 * USEC_PER_MSEC);
+}
+
+static int mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ /* Trigger link to L2 state */
+ err = mtk_pcie_turn_off_link(pcie);
+ if (err) {
+ dev_err(pcie->dev, "cannot enter L2 state\n");
+ return err;
+ }
+
+ /* Pull down the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ dev_dbg(pcie->dev, "entered L2 states successfully");
+
+ mtk_pcie_irq_save(pcie);
+ mtk_pcie_power_down(pcie);
+
+ return 0;
+}
+
+static int mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
+ int err;
+
+ err = mtk_pcie_power_up(pcie);
+ if (err)
+ return err;
+
+ err = mtk_pcie_startup_port(pcie);
+ if (err) {
+ mtk_pcie_power_down(pcie);
+ return err;
+ }
+
+ mtk_pcie_irq_restore(pcie);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
+static const struct of_device_id mtk_pcie_of_match[] = {
+ { .compatible = "mediatek,mt8192-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
+
+static struct platform_driver mtk_pcie_driver = {
+ .probe = mtk_pcie_probe,
+ .remove_new = mtk_pcie_remove,
+ .driver = {
+ .name = "mtk-pcie-gen3",
+ .of_match_table = mtk_pcie_of_match,
+ .pm = &mtk_pcie_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
new file mode 100644
index 000000000..48372013f
--- /dev/null
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Honghui Zhang <honghui.zhang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+/* PCIe shared registers */
+#define PCIE_SYS_CFG 0x00
+#define PCIE_INT_ENABLE 0x0c
+#define PCIE_CFG_ADDR 0x20
+#define PCIE_CFG_DATA 0x24
+
+/* PCIe per port registers */
+#define PCIE_BAR0_SETUP 0x10
+#define PCIE_CLASS 0x34
+#define PCIE_LINK_STATUS 0x50
+
+#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
+#define PCIE_PORT_PERST(x) BIT(1 + (x))
+#define PCIE_PORT_LINKUP BIT(0)
+#define PCIE_BAR_MAP_MAX GENMASK(31, 16)
+
+#define PCIE_BAR_ENABLE BIT(0)
+#define PCIE_REVISION_ID BIT(0)
+#define PCIE_CLASS_CODE (0x60400 << 8)
+#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
+ ((((regn) >> 8) & GENMASK(3, 0)) << 24))
+#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
+#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
+#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
+#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
+ (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
+ PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
+
+/* MediaTek specific configuration registers */
+#define PCIE_FTS_NUM 0x70c
+#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
+#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
+
+#define PCIE_FC_CREDIT 0x73c
+#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
+#define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
+
+/* PCIe V2 share registers */
+#define PCIE_SYS_CFG_V2 0x0
+#define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
+#define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
+
+/* PCIe V2 per-port registers */
+#define PCIE_MSI_VECTOR 0x0c0
+
+#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_DEVICE_ID 0x102
+#define PCIE_CONF_CLASS_ID 0x106
+
+#define PCIE_INT_MASK 0x420
+#define INTX_MASK GENMASK(19, 16)
+#define INTX_SHIFT 16
+#define PCIE_INT_STATUS 0x424
+#define MSI_STATUS BIT(23)
+#define PCIE_IMSI_STATUS 0x42c
+#define PCIE_IMSI_ADDR 0x430
+#define MSI_MASK BIT(23)
+#define MTK_MSI_IRQS_NUM 32
+
+#define PCIE_AHB_TRANS_BASE0_L 0x438
+#define PCIE_AHB_TRANS_BASE0_H 0x43c
+#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
+#define PCIE_AXI_WINDOW0 0x448
+#define WIN_ENABLE BIT(7)
+/*
+ * Define PCIe to AHB window size as 2^33 to support max 8GB address space
+ * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
+ * start from 0x40000000).
+ */
+#define PCIE2AHB_SIZE 0x21
+
+/* PCIe V2 configuration transaction header */
+#define PCIE_CFG_HEADER0 0x460
+#define PCIE_CFG_HEADER1 0x464
+#define PCIE_CFG_HEADER2 0x468
+#define PCIE_CFG_WDATA 0x470
+#define PCIE_APP_TLP_REQ 0x488
+#define PCIE_CFG_RDATA 0x48c
+#define APP_CFG_REQ BIT(0)
+#define APP_CPL_STATUS GENMASK(7, 5)
+
+#define CFG_WRRD_TYPE_0 4
+#define CFG_WR_FMT 2
+#define CFG_RD_FMT 0
+
+#define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
+#define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
+#define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
+#define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
+#define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
+#define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
+#define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
+#define CFG_HEADER_DW0(type, fmt) \
+ (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
+#define CFG_HEADER_DW1(where, size) \
+ (GENMASK(((size) - 1), 0) << ((where) & 0x3))
+#define CFG_HEADER_DW2(regn, fun, dev, bus) \
+ (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
+ CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
+
+#define PCIE_RST_CTRL 0x510
+#define PCIE_PHY_RSTB BIT(0)
+#define PCIE_PIPE_SRSTB BIT(1)
+#define PCIE_MAC_SRSTB BIT(2)
+#define PCIE_CRSTB BIT(3)
+#define PCIE_PERSTB BIT(8)
+#define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
+#define PCIE_LINK_STATUS_V2 0x804
+#define PCIE_PORT_LINKUP_V2 BIT(10)
+
+struct mtk_pcie_port;
+
+/**
+ * struct mtk_pcie_soc - differentiate between host generations
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
+ * @need_fix_device_id: whether this host's device ID needed to be fixed or not
+ * @no_msi: Bridge has no MSI support, and relies on an external block
+ * @device_id: device ID which this host need to be fixed
+ * @ops: pointer to configuration access functions
+ * @startup: pointer to controller setting functions
+ * @setup_irq: pointer to initialize IRQ functions
+ */
+struct mtk_pcie_soc {
+ bool need_fix_class_id;
+ bool need_fix_device_id;
+ bool no_msi;
+ unsigned int device_id;
+ struct pci_ops *ops;
+ int (*startup)(struct mtk_pcie_port *port);
+ int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
+};
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @base: IO mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @reset: pointer to port reset control
+ * @sys_ck: pointer to transaction/data link layer clock
+ * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
+ * and RC initiated MMIO access
+ * @axi_ck: pointer to application layer MMIO channel operating clock
+ * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
+ * when pcie_mac_ck/pcie_pipe_ck is turned off
+ * @obff_ck: pointer to OBFF functional block operating clock
+ * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
+ * @phy: pointer to PHY control block
+ * @slot: port slot
+ * @irq: GIC irq
+ * @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_pcie_port {
+ void __iomem *base;
+ struct list_head list;
+ struct mtk_pcie *pcie;
+ struct reset_control *reset;
+ struct clk *sys_ck;
+ struct clk *ahb_ck;
+ struct clk *axi_ck;
+ struct clk *aux_ck;
+ struct clk *obff_ck;
+ struct clk *pipe_ck;
+ struct phy *phy;
+ u32 slot;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
+ struct mutex lock;
+ DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
+};
+
+/**
+ * struct mtk_pcie - PCIe host information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @cfg: IO mapped register map for PCIe config
+ * @free_ck: free-run reference clock
+ * @mem: non-prefetchable memory resource
+ * @ports: pointer to PCIe port information
+ * @soc: pointer to SoC-dependent operations
+ */
+struct mtk_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *cfg;
+ struct clk *free_ck;
+
+ struct list_head ports;
+ const struct mtk_pcie_soc *soc;
+};
+
+static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+}
+
+static void mtk_pcie_port_free(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+
+ devm_iounmap(dev, port->base);
+ list_del(&port->list);
+ devm_kfree(dev, port);
+}
+
+static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
+ clk_disable_unprepare(port->sys_ck);
+ mtk_pcie_port_free(port);
+ }
+
+ mtk_pcie_subsys_powerdown(pcie);
+}
+
+static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
+{
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
+ !(val & APP_CFG_REQ), 10,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return PCIBIOS_SET_FAILED;
+
+ if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
+ return PCIBIOS_SET_FAILED;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 tmp;
+
+ /* Write PCIe configuration transaction header for Cfgrd */
+ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
+ port->base + PCIE_CFG_HEADER0);
+ writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+ writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+ port->base + PCIE_CFG_HEADER2);
+
+ /* Trigger h/w to transmit Cfgrd TLP */
+ tmp = readl(port->base + PCIE_APP_TLP_REQ);
+ tmp |= APP_CFG_REQ;
+ writel(tmp, port->base + PCIE_APP_TLP_REQ);
+
+ /* Check completion status */
+ if (mtk_pcie_check_cfg_cpld(port))
+ return PCIBIOS_SET_FAILED;
+
+ /* Read cpld payload of Cfgrd */
+ *val = readl(port->base + PCIE_CFG_RDATA);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ /* Write PCIe configuration transaction header for Cfgwr */
+ writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
+ port->base + PCIE_CFG_HEADER0);
+ writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+ writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+ port->base + PCIE_CFG_HEADER2);
+
+ /* Write Cfgwr data */
+ val = val << 8 * (where & 3);
+ writel(val, port->base + PCIE_CFG_WDATA);
+
+ /* Trigger h/w to transmit Cfgwr TLP */
+ val = readl(port->base + PCIE_APP_TLP_REQ);
+ val |= APP_CFG_REQ;
+ writel(val, port->base + PCIE_APP_TLP_REQ);
+
+ /* Check completion status */
+ return mtk_pcie_check_cfg_cpld(port);
+}
+
+static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct mtk_pcie *pcie = bus->sysdata;
+ struct mtk_pcie_port *port;
+ struct pci_dev *dev = NULL;
+
+ /*
+ * Walk the bus hierarchy to get the devfn value
+ * of the port in the root bus.
+ */
+ while (bus && bus->number) {
+ dev = bus->self;
+ bus = dev->bus;
+ devfn = dev->devfn;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list)
+ if (port->slot == PCI_SLOT(devfn))
+ return port;
+
+ return NULL;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct mtk_pcie_port *port;
+ u32 bn = bus->number;
+
+ port = mtk_pcie_find_port(bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct mtk_pcie_port *port;
+ u32 bn = bus->number;
+
+ port = mtk_pcie_find_port(bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
+}
+
+static struct pci_ops mtk_pcie_ops_v2 = {
+ .read = mtk_pcie_config_read,
+ .write = mtk_pcie_config_write,
+};
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr;
+
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg->address_hi = 0;
+ msg->address_lo = lower_32_bits(addr);
+
+ msg->data = data->hwirq;
+
+ dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void mtk_msi_ack_irq(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ u32 hwirq = data->hwirq;
+
+ writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
+
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .name = "MTK MSI",
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_msi_set_affinity,
+ .irq_ack = mtk_msi_ack_irq,
+};
+
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
+
+ bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (bit >= MTK_MSI_IRQS_NUM) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
+
+ __set_bit(bit, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq,
+ NULL, NULL);
+
+ return 0;
+}
+
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&port->lock);
+
+ if (!test_bit(d->hwirq, port->msi_irq_in_use))
+ dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mtk_pcie_irq_domain_alloc,
+ .free = mtk_pcie_irq_domain_free,
+};
+
+static struct irq_chip mtk_msi_irq_chip = {
+ .name = "MTK PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+ mutex_init(&port->lock);
+
+ port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops, port);
+ if (!port->inner_domain) {
+ dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+ port->inner_domain);
+ if (!port->msi_domain) {
+ dev_err(port->pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(port->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+{
+ u32 val;
+ phys_addr_t msg_addr;
+
+ msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ val = lower_32_bits(msg_addr);
+ writel(val, port->base + PCIE_IMSI_ADDR);
+
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~MSI_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->irq_domain)
+ irq_domain_remove(port->irq_domain);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+ if (port->inner_domain)
+ irq_domain_remove(port->inner_domain);
+ }
+
+ irq_dispose_mapping(port->irq);
+ }
+}
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
+ struct device_node *node)
+{
+ struct device *dev = port->pcie->dev;
+ struct device_node *pcie_intc_node;
+ int ret;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "no PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ of_node_put(pcie_intc_node);
+ if (!port->irq_domain) {
+ dev_err(dev, "failed to get INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ ret = mtk_pcie_allocate_msi_domains(port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
+{
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long status;
+ u32 bit = INTX_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ status = readl(port->base + PCIE_INT_STATUS);
+ if (status & INTX_MASK) {
+ for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
+ /* Clear the INTx */
+ writel(1 << bit, port->base + PCIE_INT_STATUS);
+ generic_handle_domain_irq(port->irq_domain,
+ bit - INTX_SHIFT);
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (status & MSI_STATUS){
+ unsigned long imsi_status;
+
+ /*
+ * The interrupt status can be cleared even if the
+ * MSI status remains pending. As such, given the
+ * edge-triggered interrupt type, its status should
+ * be cleared before being dispatched to the
+ * handler of the underlying device.
+ */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
+ }
+ }
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
+ struct device_node *node)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ err = mtk_pcie_init_irq_domain(port, node);
+ if (err) {
+ dev_err(dev, "failed to init PCIe IRQ domain\n");
+ return err;
+ }
+
+ if (of_property_present(dev->of_node, "interrupt-names"))
+ port->irq = platform_get_irq_byname(pdev, "pcie_irq");
+ else
+ port->irq = platform_get_irq(pdev, port->slot);
+
+ if (port->irq < 0)
+ return port->irq;
+
+ irq_set_chained_handler_and_data(port->irq,
+ mtk_pcie_intr_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource *mem = NULL;
+ struct resource_entry *entry;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
+ u32 val;
+ int err;
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (entry)
+ mem = entry->res;
+ if (!mem)
+ return -EINVAL;
+
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+ if (pcie->base) {
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ } else if (pcie->cfg) {
+ val = PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
+ }
+
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Enable PCIe link down reset, if link status changed from link up to
+ * link down, this will reset MAC control registers and configuration
+ * space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
+ * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
+ * be delayed 100ms (TPVPERL) for the power and clock to become stable.
+ */
+ msleep(100);
+
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_PCI;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
+ if (soc->need_fix_device_id)
+ writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Set INTx mask */
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~INTX_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ mtk_pcie_enable_msi(port);
+
+ /* Set AHB to PCIe translation windows */
+ val = lower_32_bits(mem->start) |
+ AHB2PCIE_SIZE(fls(resource_size(mem)));
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+ val = upper_32_bits(mem->start);
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+ /* Set PCIe to AXI translation memory space.*/
+ val = PCIE2AHB_SIZE | WIN_ENABLE;
+ writel(val, port->base + PCIE_AXI_WINDOW0);
+
+ return 0;
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mtk_pcie *pcie = bus->sysdata;
+
+ writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
+ bus->number), pcie->base + PCIE_CFG_ADDR);
+
+ return pcie->base + PCIE_CFG_DATA + (where & 3);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+ .map_bus = mtk_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ u32 func = PCI_FUNC(port->slot);
+ u32 slot = PCI_SLOT(port->slot << 3);
+ u32 val;
+ int err;
+
+ /* assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val |= PCIE_PORT_PERST(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* de-assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val &= ~PCIE_PORT_PERST(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* enable interrupt */
+ val = readl(pcie->base + PCIE_INT_ENABLE);
+ val |= PCIE_PORT_INT_EN(port->slot);
+ writel(val, pcie->base + PCIE_INT_ENABLE);
+
+ /* map to all DDR region. We need to set it before cfg operation. */
+ writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+ port->base + PCIE_BAR0_SETUP);
+
+ /* configure class code and revision ID */
+ writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
+
+ /* configure FC credit */
+ writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ val = readl(pcie->base + PCIE_CFG_DATA);
+ val &= ~PCIE_FC_CREDIT_MASK;
+ val |= PCIE_FC_CREDIT_VAL(0x806c);
+ writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ writel(val, pcie->base + PCIE_CFG_DATA);
+
+ /* configure RC FTS number to 250 when it leaves L0s */
+ writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ val = readl(pcie->base + PCIE_CFG_DATA);
+ val &= ~PCIE_FTS_NUM_MASK;
+ val |= PCIE_FTS_NUM_L0(0x50);
+ writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ writel(val, pcie->base + PCIE_CFG_DATA);
+
+ return 0;
+}
+
+static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ int err;
+
+ err = clk_prepare_enable(port->sys_ck);
+ if (err) {
+ dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
+ goto err_sys_clk;
+ }
+
+ err = clk_prepare_enable(port->ahb_ck);
+ if (err) {
+ dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
+ goto err_ahb_clk;
+ }
+
+ err = clk_prepare_enable(port->aux_ck);
+ if (err) {
+ dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
+ goto err_aux_clk;
+ }
+
+ err = clk_prepare_enable(port->axi_ck);
+ if (err) {
+ dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
+ goto err_axi_clk;
+ }
+
+ err = clk_prepare_enable(port->obff_ck);
+ if (err) {
+ dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
+ goto err_obff_clk;
+ }
+
+ err = clk_prepare_enable(port->pipe_ck);
+ if (err) {
+ dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
+ goto err_pipe_clk;
+ }
+
+ reset_control_assert(port->reset);
+ reset_control_deassert(port->reset);
+
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize port%d phy\n", port->slot);
+ goto err_phy_init;
+ }
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on port%d phy\n", port->slot);
+ goto err_phy_on;
+ }
+
+ if (!pcie->soc->startup(port))
+ return;
+
+ dev_info(dev, "Port%d link down\n", port->slot);
+
+ phy_power_off(port->phy);
+err_phy_on:
+ phy_exit(port->phy);
+err_phy_init:
+ clk_disable_unprepare(port->pipe_ck);
+err_pipe_clk:
+ clk_disable_unprepare(port->obff_ck);
+err_obff_clk:
+ clk_disable_unprepare(port->axi_ck);
+err_axi_clk:
+ clk_disable_unprepare(port->aux_ck);
+err_aux_clk:
+ clk_disable_unprepare(port->ahb_ck);
+err_ahb_clk:
+ clk_disable_unprepare(port->sys_ck);
+err_sys_clk:
+ mtk_pcie_port_free(port);
+}
+
+static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
+ struct device_node *node,
+ int slot)
+{
+ struct mtk_pcie_port *port;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[10];
+ int err;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "port%d", slot);
+ port->base = devm_platform_ioremap_resource_byname(pdev, name);
+ if (IS_ERR(port->base)) {
+ dev_err(dev, "failed to map port%d base\n", slot);
+ return PTR_ERR(port->base);
+ }
+
+ snprintf(name, sizeof(name), "sys_ck%d", slot);
+ port->sys_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->sys_ck)) {
+ dev_err(dev, "failed to get sys_ck%d clock\n", slot);
+ return PTR_ERR(port->sys_ck);
+ }
+
+ /* sys_ck might be divided into the following parts in some chips */
+ snprintf(name, sizeof(name), "ahb_ck%d", slot);
+ port->ahb_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->ahb_ck))
+ return PTR_ERR(port->ahb_ck);
+
+ snprintf(name, sizeof(name), "axi_ck%d", slot);
+ port->axi_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->axi_ck))
+ return PTR_ERR(port->axi_ck);
+
+ snprintf(name, sizeof(name), "aux_ck%d", slot);
+ port->aux_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->aux_ck))
+ return PTR_ERR(port->aux_ck);
+
+ snprintf(name, sizeof(name), "obff_ck%d", slot);
+ port->obff_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->obff_ck))
+ return PTR_ERR(port->obff_ck);
+
+ snprintf(name, sizeof(name), "pipe_ck%d", slot);
+ port->pipe_ck = devm_clk_get_optional(dev, name);
+ if (IS_ERR(port->pipe_ck))
+ return PTR_ERR(port->pipe_ck);
+
+ snprintf(name, sizeof(name), "pcie-rst%d", slot);
+ port->reset = devm_reset_control_get_optional_exclusive(dev, name);
+ if (PTR_ERR(port->reset) == -EPROBE_DEFER)
+ return PTR_ERR(port->reset);
+
+ /* some platforms may use default PHY setting */
+ snprintf(name, sizeof(name), "pcie-phy%d", slot);
+ port->phy = devm_phy_optional_get(dev, name);
+ if (IS_ERR(port->phy))
+ return PTR_ERR(port->phy);
+
+ port->slot = slot;
+ port->pcie = pcie;
+
+ if (pcie->soc->setup_irq) {
+ err = pcie->soc->setup_irq(port, node);
+ if (err)
+ return err;
+ }
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+ struct device_node *cfg_node;
+ int err;
+
+ /* get shared registers, which are optional */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
+ if (regs) {
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+ }
+
+ cfg_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,generic-pciecfg");
+ if (cfg_node) {
+ pcie->cfg = syscon_node_to_regmap(cfg_node);
+ of_node_put(cfg_node);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+ }
+
+ pcie->free_ck = devm_clk_get(dev, "free_ck");
+ if (IS_ERR(pcie->free_ck)) {
+ if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ pcie->free_ck = NULL;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ /* enable top level clock */
+ err = clk_prepare_enable(pcie->free_ck);
+ if (err) {
+ dev_err(dev, "failed to enable free_ck\n");
+ goto err_free_ck;
+ }
+
+ return 0;
+
+err_free_ck:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return err;
+}
+
+static int mtk_pcie_setup(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node, *child;
+ struct mtk_pcie_port *port, *tmp;
+ int err, slot;
+
+ slot = of_get_pci_domain_nr(dev->of_node);
+ if (slot < 0) {
+ for_each_available_child_of_node(node, child) {
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ dev_err(dev, "failed to get devfn: %d\n", err);
+ goto error_put_node;
+ }
+
+ slot = PCI_SLOT(err);
+
+ err = mtk_pcie_parse_port(pcie, child, slot);
+ if (err)
+ goto error_put_node;
+ }
+ } else {
+ err = mtk_pcie_parse_port(pcie, node, slot);
+ if (err)
+ return err;
+ }
+
+ err = mtk_pcie_subsys_powerup(pcie);
+ if (err)
+ return err;
+
+ /* enable each port, and then check link status */
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_port(port);
+
+ /* power down PCIe subsys if slots are all empty (link down) */
+ if (list_empty(&pcie->ports))
+ mtk_pcie_subsys_powerdown(pcie);
+
+ return 0;
+error_put_node:
+ of_node_put(child);
+ return err;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_pcie *pcie;
+ struct pci_host_bridge *host;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!host)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(host);
+
+ pcie->dev = dev;
+ pcie->soc = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, pcie);
+ INIT_LIST_HEAD(&pcie->ports);
+
+ err = mtk_pcie_setup(pcie);
+ if (err)
+ return err;
+
+ host->ops = pcie->soc->ops;
+ host->sysdata = pcie;
+ host->msi_domain = pcie->soc->no_msi;
+
+ err = pci_host_probe(host);
+ if (err)
+ goto put_resources;
+
+ return 0;
+
+put_resources:
+ if (!list_empty(&pcie->ports))
+ mtk_pcie_put_resources(pcie);
+
+ return err;
+}
+
+
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+
+ pci_free_resource_list(windows);
+}
+
+static void mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ mtk_pcie_free_resources(pcie);
+
+ mtk_pcie_irq_teardown(pcie);
+
+ mtk_pcie_put_resources(pcie);
+}
+
+static int mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
+ clk_disable_unprepare(port->sys_ck);
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static int mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port, *tmp;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ clk_prepare_enable(pcie->free_ck);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_port(port);
+
+ /* In case of EP was removed while system suspend. */
+ if (list_empty(&pcie->ports))
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
+ .no_msi = true,
+ .ops = &mtk_pcie_ops,
+ .startup = mtk_pcie_startup_port,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+ .need_fix_class_id = true,
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
+ .need_fix_class_id = true,
+ .need_fix_device_id = true,
+ .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct of_device_id mtk_pcie_ids[] = {
+ { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
+ { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
+ { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
+
+static struct platform_driver mtk_pcie_driver = {
+ .probe = mtk_pcie_probe,
+ .remove_new = mtk_pcie_remove,
+ .driver = {
+ .name = "mtk-pcie",
+ .of_match_table = mtk_pcie_ids,
+ .suppress_bind_attrs = true,
+ .pm = &mtk_pcie_pm_ops,
+ },
+};
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
new file mode 100644
index 000000000..137fb8570
--- /dev/null
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+/* Number of MSI IRQs */
+#define MC_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
+#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
+
+/* PCIe Bridge Phy Regs */
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_SHIFT 1
+#define ATR_IMPL_ENABLE 1
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+
+#define ATR_ENTRY_SIZE 32
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_EVENT_CNT 0x20
+#define DED_ERROR_EVENT_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+
+/* Events */
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
+#define EVENT_LOCAL_PM_MSI_INT_INTX 23
+#define EVENT_LOCAL_PM_MSI_INT_MSI 24
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
+#define NUM_EVENTS 28
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .base = MC_PCIE_CTRL_ADDR, \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .base = MC_PCIE_BRIDGE_ADDR, \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+struct mc_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
+};
+
+struct mc_pcie {
+ void __iomem *axi_base_addr;
+ struct device *dev;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct mc_msi msi;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+static struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+static struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+static struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+static struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+static struct {
+ u32 base;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static struct mc_pcie *port;
+
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
+{
+ struct mc_msi *msi = &port->msi;
+ u16 reg;
+ u8 queue_size;
+
+ /* Fixup MSI enable flag */
+ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+ reg |= PCI_MSI_FLAGS_ENABLE;
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup PCI MSI queue flags */
+ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
+ reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup MSI addr fields */
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
+}
+
+static void mc_handle_msi(struct irq_desc *desc)
+{
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct mc_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mc_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mc_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mc_msi_bottom_irq_chip = {
+ .name = "Microchip MSI",
+ .irq_ack = mc_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = mc_compose_msi_msg,
+ .irq_set_affinity = mc_msi_set_affinity,
+};
+
+static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mc_pcie *port = domain->host_data;
+ struct mc_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(d);
+ struct mc_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mc_irq_msi_domain_alloc,
+ .free = mc_irq_msi_domain_free,
+};
+
+static struct irq_chip mc_msi_irq_chip = {
+ .name = "Microchip PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mc_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mc_msi_irq_chip,
+};
+
+static int mc_allocate_msi_domains(struct mc_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mc_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
+ &msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mc_handle_intx(struct irq_desc *desc)
+{
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mc_ack_intx_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void mc_mask_intx_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void mc_unmask_intx_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip mc_intx_irq_chip = {
+ .name = "Microchip PCIe INTx",
+ .irq_ack = mc_ack_intx_irq,
+ .irq_mask = mc_mask_intx_irq,
+ .irq_unmask = mc_unmask_intx_irq,
+};
+
+static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mc_pcie_intx_map,
+};
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(struct mc_pcie *port)
+{
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(struct mc_pcie *port)
+{
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(struct mc_pcie *port)
+{
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(struct mc_pcie *port)
+{
+ void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 get_events(struct mc_pcie *port)
+{
+ u32 events = 0;
+
+ events |= pcie_events(port);
+ events |= sec_errors(port);
+ events |= ded_errors(port);
+ events |= local_events(port);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct mc_pcie *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_handle_event(struct irq_desc *desc)
+{
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = get_events(port);
+
+ for_each_set_bit(bit, &events, NUM_EVENTS)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ addr = port->axi_base_addr + event_descs[event].base +
+ event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = mc_pcie_event_map,
+};
+
+static inline void mc_pcie_deinit_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_pcie_init_irq_domains(struct mc_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
+ &event_domain_ops, port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return mc_allocate_msi_domains(port);
+}
+
+static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
+ ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+
+static int mc_pcie_setup_windows(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ mc_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+
+static inline void mc_clear_secs(struct mc_pcie *port)
+{
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
+ SEC_ERROR_INT);
+ writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+}
+
+static inline void mc_clear_deds(struct mc_pcie *port)
+{
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
+ DED_ERROR_INT);
+ writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
+}
+
+static void mc_disable_interrupts(struct mc_pcie *port)
+{
+ void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
+ u32 val;
+
+ /* Ensure ECC bypass is enabled */
+ val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
+ ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+ writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
+
+ /* Disable SEC errors and clear any outstanding */
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
+ SEC_ERROR_INT_MASK);
+ mc_clear_secs(port);
+
+ /* Disable DED errors and clear any outstanding */
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
+ DED_ERROR_INT_MASK);
+ mc_clear_deds(port);
+
+ /* Disable local interrupts and clear any outstanding */
+ writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
+ writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
+ writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
+
+ /* Disable PCIe events and clear any outstanding */
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT |
+ PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+ writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
+
+ /* Disable host interrupts and clear any outstanding */
+ writel_relaxed(0, bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
+}
+
+static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ int irq;
+ int i, intx_irq, msi_irq, event_irq;
+ int ret;
+
+ ret = mc_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ for (i = 0; i < NUM_EVENTS; i++) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(dev, event_irq, mc_event_handler,
+ 0, event_cause[i].sym, port);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ intx_irq = irq_create_mapping(port->event_domain,
+ EVENT_LOCAL_PM_MSI_INT_INTX);
+ if (!intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
+
+ msi_irq = irq_create_mapping(port->event_domain,
+ EVENT_LOCAL_PM_MSI_INT_MSI);
+ if (!msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(irq, mc_handle_event, port);
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *bridge_base_addr =
+ port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ int ret;
+
+ /* Configure address translation table 0 for PCIe config space */
+ mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
+
+ /* Need some fixups in config space */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ /* Configure non-config space outbound ranges */
+ ret = mc_pcie_setup_windows(pdev, port);
+ if (ret)
+ return ret;
+
+ /* Address translation is up; safe to enable interrupts */
+ ret = mc_init_interrupts(pdev, port);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mc_host_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *bridge_base_addr;
+ int ret;
+ u32 val;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = dev;
+
+ port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(port->axi_base_addr))
+ return PTR_ERR(port->axi_base_addr);
+
+ mc_disable_interrupts(port);
+
+ bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+
+ /* Allow enabling MSI by disabling MSI-X */
+ val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= ~MSIX_CAP_MASK;
+ writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
+
+ /* Pick num vectors from bitfile programmed onto FPGA fabric */
+ val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= NUM_MSI_MSGS_MASK;
+ val >>= NUM_MSI_MSGS_SHIFT;
+
+ port->msi.num_vectors = 1 << val;
+
+ /* Pick vector address from design */
+ port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ return pci_host_common_probe(pdev);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = mc_host_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
new file mode 100644
index 000000000..79e225edb
--- /dev/null
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * BRIEF MODULE DESCRIPTION
+ * PCI init for Ralink RT2880 solution
+ *
+ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw)
+ *
+ * May 2007 Bruce Chang
+ * Initial Release
+ *
+ * May 2009 Bruce Chang
+ * support RT2880/RT3883 PCIe
+ *
+ * May 2011 Bruce Chang
+ * support RT6855/MT7620 PCIe
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+
+#include "../pci.h"
+
+/* MediaTek-specific configuration registers */
+#define PCIE_FTS_NUM 0x70c
+#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
+#define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
+
+/* Host-PCI bridge registers */
+#define RALINK_PCI_PCICFG_ADDR 0x0000
+#define RALINK_PCI_PCIMSK_ADDR 0x000c
+#define RALINK_PCI_CONFIG_ADDR 0x0020
+#define RALINK_PCI_CONFIG_DATA 0x0024
+#define RALINK_PCI_MEMBASE 0x0028
+#define RALINK_PCI_IOBASE 0x002c
+
+/* PCIe RC control registers */
+#define RALINK_PCI_ID 0x0030
+#define RALINK_PCI_CLASS 0x0034
+#define RALINK_PCI_SUBID 0x0038
+#define RALINK_PCI_STATUS 0x0050
+
+/* Some definition values */
+#define PCIE_REVISION_ID BIT(0)
+#define PCIE_CLASS_CODE (0x60400 << 8)
+#define PCIE_BAR_MAP_MAX GENMASK(30, 16)
+#define PCIE_BAR_ENABLE BIT(0)
+#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
+#define PCIE_PORT_LINKUP BIT(0)
+#define PCIE_PORT_CNT 3
+
+#define INIT_PORTS_DELAY_MS 100
+#define PERST_DELAY_MS 100
+
+/**
+ * struct mt7621_pcie_port - PCIe port information
+ * @base: I/O mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @clk: pointer to the port clock gate
+ * @phy: pointer to PHY control block
+ * @pcie_rst: pointer to port reset control
+ * @gpio_rst: gpio reset
+ * @slot: port slot
+ * @enabled: indicates if port is enabled
+ */
+struct mt7621_pcie_port {
+ void __iomem *base;
+ struct list_head list;
+ struct mt7621_pcie *pcie;
+ struct clk *clk;
+ struct phy *phy;
+ struct reset_control *pcie_rst;
+ struct gpio_desc *gpio_rst;
+ u32 slot;
+ bool enabled;
+};
+
+/**
+ * struct mt7621_pcie - PCIe host information
+ * @base: IO Mapped Register Base
+ * @dev: Pointer to PCIe device
+ * @ports: pointer to PCIe port information
+ * @resets_inverted: depends on chip revision
+ * reset lines are inverted.
+ */
+struct mt7621_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head ports;
+ bool resets_inverted;
+};
+
+static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->base + reg);
+}
+
+static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->base + reg);
+}
+
+static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
+{
+ return readl_relaxed(port->base + reg);
+}
+
+static inline void pcie_port_write(struct mt7621_pcie_port *port,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, port->base + reg);
+}
+
+static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mt7621_pcie *pcie = bus->sysdata;
+ u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
+
+ return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
+}
+
+static struct pci_ops mt7621_pcie_ops = {
+ .map_bus = mt7621_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
+{
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
+
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
+ return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
+}
+
+static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
+ u32 reg, u32 val)
+{
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
+
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
+ pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
+}
+
+static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port)
+{
+ if (port->gpio_rst)
+ gpiod_set_value(port->gpio_rst, 1);
+}
+
+static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port)
+{
+ if (port->gpio_rst)
+ gpiod_set_value(port->gpio_rst, 0);
+}
+
+static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
+{
+ return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
+}
+
+static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
+{
+ struct mt7621_pcie *pcie = port->pcie;
+
+ if (pcie->resets_inverted)
+ reset_control_assert(port->pcie_rst);
+ else
+ reset_control_deassert(port->pcie_rst);
+}
+
+static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
+{
+ struct mt7621_pcie *pcie = port->pcie;
+
+ if (pcie->resets_inverted)
+ reset_control_deassert(port->pcie_rst);
+ else
+ reset_control_assert(port->pcie_rst);
+}
+
+static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
+ struct device_node *node,
+ int slot)
+{
+ struct mt7621_pcie_port *port;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[10];
+ int err;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->base = devm_platform_ioremap_resource(pdev, slot + 1);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ port->clk = devm_get_clk_from_child(dev, node, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "failed to get pcie%d clock\n", slot);
+ return PTR_ERR(port->clk);
+ }
+
+ port->pcie_rst = of_reset_control_get_exclusive(node, NULL);
+ if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) {
+ dev_err(dev, "failed to get pcie%d reset control\n", slot);
+ return PTR_ERR(port->pcie_rst);
+ }
+
+ snprintf(name, sizeof(name), "pcie-phy%d", slot);
+ port->phy = devm_of_phy_get(dev, node, name);
+ if (IS_ERR(port->phy)) {
+ dev_err(dev, "failed to get pcie-phy%d\n", slot);
+ err = PTR_ERR(port->phy);
+ goto remove_reset;
+ }
+
+ port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(port->gpio_rst)) {
+ dev_err(dev, "failed to get GPIO for PCIe%d\n", slot);
+ err = PTR_ERR(port->gpio_rst);
+ goto remove_reset;
+ }
+
+ port->slot = slot;
+ port->pcie = pcie;
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+
+remove_reset:
+ reset_control_put(port->pcie_rst);
+ return err;
+}
+
+static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node, *child;
+ int err;
+
+ pcie->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ for_each_available_child_of_node(node, child) {
+ int slot;
+
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ of_node_put(child);
+ dev_err(dev, "failed to parse devfn: %d\n", err);
+ return err;
+ }
+
+ slot = PCI_SLOT(err);
+
+ err = mt7621_pcie_parse_port(pcie, child, slot);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
+{
+ struct mt7621_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ u32 slot = port->slot;
+ int err;
+
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize port%d phy\n", slot);
+ return err;
+ }
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on port%d phy\n", slot);
+ phy_exit(port->phy);
+ return err;
+ }
+
+ port->enabled = true;
+
+ return 0;
+}
+
+static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ /* PCIe RC reset assert */
+ mt7621_control_assert(port);
+
+ /* PCIe EP reset assert */
+ mt7621_rst_gpio_pcie_assert(port);
+ }
+
+ msleep(PERST_DELAY_MS);
+}
+
+static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ mt7621_control_deassert(port);
+}
+
+static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ mt7621_rst_gpio_pcie_deassert(port);
+
+ msleep(PERST_DELAY_MS);
+}
+
+static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct mt7621_pcie_port *port, *tmp;
+ u8 num_disabled = 0;
+ int err;
+
+ mt7621_pcie_reset_assert(pcie);
+ mt7621_pcie_reset_rc_deassert(pcie);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ u32 slot = port->slot;
+
+ if (slot == 1) {
+ port->enabled = true;
+ continue;
+ }
+
+ err = mt7621_pcie_init_port(port);
+ if (err) {
+ dev_err(dev, "initializing port %d failed\n", slot);
+ list_del(&port->list);
+ }
+ }
+
+ msleep(INIT_PORTS_DELAY_MS);
+ mt7621_pcie_reset_ep_deassert(pcie);
+
+ tmp = NULL;
+ list_for_each_entry(port, &pcie->ports, list) {
+ u32 slot = port->slot;
+
+ if (!mt7621_pcie_port_is_linkup(port)) {
+ dev_info(dev, "pcie%d no card, disable it (RST & CLK)\n",
+ slot);
+ mt7621_control_assert(port);
+ port->enabled = false;
+ num_disabled++;
+
+ if (slot == 0) {
+ tmp = port;
+ continue;
+ }
+
+ if (slot == 1 && tmp && !tmp->enabled)
+ phy_power_off(tmp->phy);
+ }
+ }
+
+ return (num_disabled != PCIE_PORT_CNT) ? 0 : -ENODEV;
+}
+
+static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
+{
+ struct mt7621_pcie *pcie = port->pcie;
+ u32 slot = port->slot;
+ u32 val;
+
+ /* enable pcie interrupt */
+ val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
+ val |= PCIE_PORT_INT_EN(slot);
+ pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
+
+ /* map 2G DDR region */
+ pcie_port_write(port, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+ PCI_BASE_ADDRESS_0);
+
+ /* configure class code and revision ID */
+ pcie_port_write(port, PCIE_CLASS_CODE | PCIE_REVISION_ID,
+ RALINK_PCI_CLASS);
+
+ /* configure RC FTS number to 250 when it leaves L0s */
+ val = read_config(pcie, slot, PCIE_FTS_NUM);
+ val &= ~PCIE_FTS_NUM_MASK;
+ val |= PCIE_FTS_NUM_L0(0x50);
+ write_config(pcie, slot, PCIE_FTS_NUM, val);
+}
+
+static int mt7621_pcie_enable_ports(struct pci_host_bridge *host)
+{
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
+ struct device *dev = pcie->dev;
+ struct mt7621_pcie_port *port;
+ struct resource_entry *entry;
+ int err;
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (!entry) {
+ dev_err(dev, "cannot get io resource\n");
+ return -EINVAL;
+ }
+
+ /* Setup MEMWIN and IOWIN */
+ pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
+ pcie_write(pcie, entry->res->start - entry->offset, RALINK_PCI_IOBASE);
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ if (port->enabled) {
+ err = clk_prepare_enable(port->clk);
+ if (err) {
+ dev_err(dev, "enabling clk pcie%d\n",
+ port->slot);
+ return err;
+ }
+
+ mt7621_pcie_enable_port(port);
+ dev_info(dev, "PCIE%d enabled\n", port->slot);
+ }
+ }
+
+ return 0;
+}
+
+static int mt7621_pcie_register_host(struct pci_host_bridge *host)
+{
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
+
+ host->ops = &mt7621_pcie_ops;
+ host->sysdata = pcie;
+ return pci_host_probe(host);
+}
+
+static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
+ { .soc_id = "mt7621", .revision = "E2" },
+ { /* sentinel */ }
+};
+
+static int mt7621_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
+ struct mt7621_pcie_port *port;
+ struct mt7621_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+ INIT_LIST_HEAD(&pcie->ports);
+
+ attr = soc_device_match(mt7621_pcie_quirks_match);
+ if (attr)
+ pcie->resets_inverted = true;
+
+ err = mt7621_pcie_parse_dt(pcie);
+ if (err) {
+ dev_err(dev, "parsing DT failed\n");
+ return err;
+ }
+
+ err = mt7621_pcie_init_ports(pcie);
+ if (err) {
+ dev_err(dev, "nothing connected in virtual bridges\n");
+ return 0;
+ }
+
+ err = mt7621_pcie_enable_ports(bridge);
+ if (err) {
+ dev_err(dev, "error enabling pcie ports\n");
+ goto remove_resets;
+ }
+
+ return mt7621_pcie_register_host(bridge);
+
+remove_resets:
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_put(port->pcie_rst);
+
+ return err;
+}
+
+static void mt7621_pcie_remove(struct platform_device *pdev)
+{
+ struct mt7621_pcie *pcie = platform_get_drvdata(pdev);
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_put(port->pcie_rst);
+}
+
+static const struct of_device_id mt7621_pcie_ids[] = {
+ { .compatible = "mediatek,mt7621-pci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
+
+static struct platform_driver mt7621_pcie_driver = {
+ .probe = mt7621_pcie_probe,
+ .remove_new = mt7621_pcie_remove,
+ .driver = {
+ .name = "mt7621-pci",
+ .of_match_table = mt7621_pcie_ids,
+ },
+};
+builtin_platform_driver(mt7621_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
new file mode 100644
index 000000000..f9682df1d
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint driver for Renesas R-Car SoCs
+ * Copyright (c) 2020 Renesas Electronics Europe GmbH
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-rcar.h"
+
+#define RCAR_EPC_MAX_FUNCTIONS 1
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_endpoint {
+ struct rcar_pcie pcie;
+ phys_addr_t *ob_mapped_addr;
+ struct pci_epc_mem_window *ob_window;
+ u8 max_functions;
+ unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
+ unsigned long *ib_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+};
+
+static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
+{
+ u32 val;
+
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set endpoint mode */
+ rcar_pci_write_reg(pcie, 0, PCIEMSR);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_NORMAL);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(1));
+ /* device supports fixed 128 bytes MPSS */
+ val &= ~GENMASK(2, 0);
+ rcar_pci_write_reg(pcie, val, EXPCAP(1));
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(2));
+ /* read requests size 128 bytes */
+ val &= ~GENMASK(14, 12);
+ /* payload size 128 bytes */
+ val &= ~GENMASK(7, 5);
+ rcar_pci_write_reg(pcie, val, EXPCAP(2));
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* flush modifications */
+ wmb();
+}
+
+static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
+ phys_addr_t addr)
+{
+ int i;
+
+ for (i = 0; i < ep->num_ob_windows; i++)
+ if (ep->ob_window[i].phys_base == addr)
+ return i;
+
+ return -EINVAL;
+}
+
+static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ char outbound_name[10];
+ struct resource *res;
+ unsigned int i = 0;
+
+ ep->num_ob_windows = 0;
+ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
+ sprintf(outbound_name, "memory%u", i);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ outbound_name);
+ if (!res) {
+ dev_err(pcie->dev, "missing outbound window %u\n", i);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res),
+ outbound_name)) {
+ dev_err(pcie->dev, "Cannot request memory region %s.\n",
+ outbound_name);
+ return -EIO;
+ }
+
+ ep->ob_window[i].phys_base = res->start;
+ ep->ob_window[i].size = resource_size(res);
+ /* controller doesn't support multiple allocation
+ * from same window, so set page_size to window size
+ */
+ ep->ob_window[i].page_size = resource_size(res);
+ }
+ ep->num_ob_windows = i;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct pci_epc_mem_window *window;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
+ sizeof(*window), GFP_KERNEL);
+ if (!ep->ob_window)
+ return -ENOMEM;
+
+ rcar_pcie_parse_outbound_ranges(ep, pdev);
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->max_functions);
+ if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
+ ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_header *hdr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ if (!fn)
+ val = hdr->vendorid;
+ else
+ val = rcar_pci_read_reg(pcie, IDSETR0);
+ val |= hdr->deviceid << 16;
+ rcar_pci_write_reg(pcie, val, IDSETR0);
+
+ val = hdr->revid;
+ val |= hdr->progif_code << 8;
+ val |= hdr->subclass_code << 16;
+ val |= hdr->baseclass_code << 24;
+ rcar_pci_write_reg(pcie, val, IDSETR1);
+
+ if (!fn)
+ val = hdr->subsys_vendor_id;
+ else
+ val = rcar_pci_read_reg(pcie, SUBIDSETR);
+ val |= hdr->subsys_id << 16;
+ rcar_pci_write_reg(pcie, val, SUBIDSETR);
+
+ if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
+ return -EINVAL;
+ val = rcar_pci_read_reg(pcie, PCICONF(15));
+ val |= (hdr->interrupt_pin << 8);
+ rcar_pci_write_reg(pcie, val, PCICONF(15));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ u64 size = 1ULL << fls64(epf_bar->size - 1);
+ dma_addr_t cpu_addr = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 mask;
+ int idx;
+ int err;
+
+ idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+ if (idx >= ep->num_ib_windows) {
+ dev_err(pcie->dev, "no free inbound window\n");
+ return -EINVAL;
+ }
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
+ flags |= IO_SPACE;
+
+ ep->bar_to_atu[bar] = idx;
+ /* use 64-bit BARs */
+ set_bit(idx, ep->ib_window_map);
+ set_bit(idx + 1, ep->ib_window_map);
+
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr,
+ 0x0, mask | flags, idx, false);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err) {
+ dev_err(pcie->dev, "phy not ready\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
+
+ clear_bit(atu_index, ep->ib_window_map);
+ clear_bit(atu_index + 1, ep->ib_window_map);
+}
+
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ rcar_pci_write_reg(pcie, flags, MSICAP(fn));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(flags & MSICAP0_MSIE))
+ return -EINVAL;
+
+ return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+}
+
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct resource_entry win;
+ struct resource res;
+ int window;
+ int err;
+
+ /* check if we have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err) {
+ dev_err(pcie->dev, "link not up\n");
+ return err;
+ }
+
+ window = rcar_pcie_ep_get_window(ep, addr);
+ if (window < 0) {
+ dev_err(pcie->dev, "failed to get corresponding window\n");
+ return -EINVAL;
+ }
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ res.start = pci_addr;
+ res.end = pci_addr + size - 1;
+ res.flags = IORESOURCE_MEM;
+ win.res = &res;
+
+ rcar_pcie_set_outbound(pcie, window, &win);
+
+ ep->ob_mapped_addr[window] = addr;
+
+ return 0;
+}
+
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct resource_entry win;
+ struct resource res;
+ int idx;
+
+ for (idx = 0; idx < ep->num_ob_windows; idx++)
+ if (ep->ob_mapped_addr[idx] == addr)
+ break;
+
+ if (idx >= ep->num_ob_windows)
+ return;
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ win.res = &res;
+ rcar_pcie_set_outbound(&ep->pcie, idx, &win);
+
+ ep->ob_mapped_addr[idx] = 0;
+}
+
+static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
+ u8 fn, u8 intx)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ if ((val & PCI_MSI_FLAGS_ENABLE)) {
+ dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCICONF(1));
+ if ((val & INTDIS)) {
+ dev_err(pcie->dev, "INTx message transmission is disabled\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ if ((val & ASTINTX)) {
+ dev_err(pcie->dev, "INTx is already asserted\n");
+ return -EINVAL;
+ }
+
+ val |= ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+ usleep_range(1000, 1001);
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ val &= ~ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
+ u8 fn, u8 interrupt_num)
+{
+ u16 msi_count;
+ u32 val;
+
+ /* Check MSI enable bit */
+ val = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(val & MSICAP0_MSIE))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ msi_count = 1 << msi_count;
+
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rcar_pcie_ep_assert_intx(ep, fn, 0);
+
+ case PCI_EPC_IRQ_MSI:
+ return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
+
+ return 0;
+}
+
+static void rcar_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
+}
+
+static const struct pci_epc_features rcar_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
+ .bar_fixed_size[0] = 128,
+ .bar_fixed_size[2] = 256,
+ .bar_fixed_size[4] = 256,
+};
+
+static const struct pci_epc_features*
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ return &rcar_pcie_epc_features;
+}
+
+static const struct pci_epc_ops rcar_pcie_epc_ops = {
+ .write_header = rcar_pcie_ep_write_header,
+ .set_bar = rcar_pcie_ep_set_bar,
+ .clear_bar = rcar_pcie_ep_clear_bar,
+ .set_msi = rcar_pcie_ep_set_msi,
+ .get_msi = rcar_pcie_ep_get_msi,
+ .map_addr = rcar_pcie_ep_map_addr,
+ .unmap_addr = rcar_pcie_ep_unmap_addr,
+ .raise_irq = rcar_pcie_ep_raise_irq,
+ .start = rcar_pcie_ep_start,
+ .stop = rcar_pcie_ep_stop,
+ .get_features = rcar_pcie_ep_get_features,
+};
+
+static const struct of_device_id rcar_pcie_ep_of_match[] = {
+ { .compatible = "renesas,r8a774c0-pcie-ep", },
+ { .compatible = "renesas,rcar-gen3-pcie-ep" },
+ { },
+};
+
+static int rcar_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_endpoint *ep;
+ struct rcar_pcie *pcie;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ pcie = &ep->pcie;
+ pcie->dev = dev;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ goto err_pm_disable;
+ }
+
+ err = rcar_pcie_ep_get_pdata(ep, pdev);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
+ ep->ib_window_map =
+ devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
+ sizeof(long), GFP_KERNEL);
+ if (!ep->ib_window_map) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for inbound map\n");
+ goto err_pm_put;
+ }
+
+ ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
+ sizeof(*ep->ob_mapped_addr),
+ GFP_KERNEL);
+ if (!ep->ob_mapped_addr) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
+ goto err_pm_put;
+ }
+
+ epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ err = PTR_ERR(epc);
+ goto err_pm_put;
+ }
+
+ epc->max_functions = ep->max_functions;
+ epc_set_drvdata(epc, ep);
+
+ rcar_pcie_ep_hw_init(pcie);
+
+ err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the epc memory space\n");
+ goto err_pm_put;
+ }
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(dev);
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+
+ return err;
+}
+
+static struct platform_driver rcar_pcie_ep_driver = {
+ .driver = {
+ .name = "rcar-pcie-ep",
+ .of_match_table = rcar_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_ep_probe,
+};
+builtin_platform_driver(rcar_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
new file mode 100644
index 000000000..88975e40e
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-rcar.h"
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
+ int irq1;
+ int irq2;
+};
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_host {
+ struct rcar_pcie pcie;
+ struct phy *phy;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+ int (*phy_init_fn)(struct rcar_pcie_host *host);
+};
+
+static DEFINE_SPINLOCK(pmsr_lock);
+
+static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+{
+ unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+ spin_lock_irqsave(&pmsr_lock, flags);
+
+ if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ pmsr = readl(pcie_base + PMSR);
+
+ /*
+ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+ * the PCIe controller is not in L1 link state. If true, apply
+ * fix, which will put the controller into L1 link state, from
+ * which it can return to L0s/L0 on its own.
+ */
+ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+unlock_exit:
+ spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+}
+
+static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
+{
+ return container_of(msi, struct rcar_pcie_host, msi);
+}
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+#ifdef CONFIG_ARM
+#define __rcar_pci_rw_reg_workaround(instr) \
+ " .arch armv7-a\n" \
+ "1: " instr " %1, [%2]\n" \
+ "2: isb\n" \
+ "3: .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
+ " b 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection\n"
+#endif
+
+static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
+ unsigned int reg)
+{
+ int error = PCIBIOS_SUCCESSFUL;
+#ifdef CONFIG_ARM
+ asm volatile(
+ __rcar_pci_rw_reg_workaround("str")
+ : "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
+#else
+ rcar_pci_write_reg(pcie, val, reg);
+#endif
+ return error;
+}
+
+static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
+ unsigned int reg)
+{
+ int error = PCIBIOS_SUCCESSFUL;
+#ifdef CONFIG_ARM
+ asm volatile(
+ __rcar_pci_rw_reg_workaround("ldr")
+ : "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
+
+ if (error != PCIBIOS_SUCCESSFUL)
+ PCI_SET_ERROR_RESPONSE(val);
+#else
+ *val = rcar_pci_read_reg(pcie, reg);
+#endif
+ return error;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie_host *host,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int dev, func, reg, index;
+ int ret;
+
+ /* Wake the bus up in case it is in L1 state. */
+ ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
+ if (ret) {
+ PCI_SET_ERROR_RESPONSE(data);
+ return PCIBIOS_SET_FAILED;
+ }
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ)
+ *data = rcar_pci_read_reg(pcie, PCICONF(index));
+ else
+ rcar_pci_write_reg(pcie, *data, PCICONF(index));
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ /* Clear errors */
+ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+ PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (pci_is_root_bus(bus->parent))
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
+ else
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ)
+ ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
+ else
+ ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ rcar_pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return ret;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (size == 1)
+ *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, *val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ unsigned int shift;
+ u32 data;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, val);
+
+ if (size == 1) {
+ shift = BITS_PER_BYTE * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = BITS_PER_BYTE * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 1000;
+ u32 macsr;
+
+ if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+ return;
+
+ if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+ dev_err(dev, "Speed change already in progress\n");
+ return;
+ }
+
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+ goto done;
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set speed change reason as intentional factor */
+ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+ /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ /* Start link speed change */
+ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+ while (timeout--) {
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if (macsr & SPCHGFIN) {
+ /* Clear the interrupt bits */
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ if (macsr & SPCHGFAIL)
+ dev_err(dev, "Speed change failed\n");
+
+ goto done;
+ }
+
+ msleep(1);
+ }
+
+ dev_err(dev, "Speed change timed out\n");
+
+done:
+ dev_info(dev, "Current link speed is %s GT/s\n",
+ (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
+static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *win;
+ LIST_HEAD(res);
+ int i = 0;
+
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
+ /* Setup PCI resources */
+ resource_list_for_each_entry(win, &bridge->windows) {
+ struct resource *res = win->res;
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rcar_pcie_set_outbound(pcie, i, win);
+ i++;
+ break;
+ }
+ }
+}
+
+static int rcar_pcie_enable(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+
+ rcar_pcie_hw_enable(host);
+
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ bridge->sysdata = host;
+ bridge->ops = &rcar_pcie_ops;
+
+ return pci_host_probe(bridge);
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, u32 addr,
+ unsigned int lane, u32 data)
+{
+ u32 phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ rcar_pci_write_reg(pcie, 1, PCIEMSR);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+ PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
+ /* Finish initialization - establish a PCI Express link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /*
+ * These settings come from the R-Car Series, 2nd Generation User's
+ * Manual, section 50.3.1 (2) Initialization of the physical layer.
+ */
+ rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
+ rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
+ /* The following value is for DC connection, no termination resistor */
+ rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
+{
+ int err;
+
+ err = phy_init(host->phy);
+ if (err)
+ return err;
+
+ err = phy_power_on(host->phy);
+ if (err)
+ phy_exit(host->phy);
+
+ return err;
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie_host *host = data;
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+ struct device *dev = pcie->dev;
+ unsigned long reg;
+
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ int ret;
+
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
+ /* Unknown MSI, just clear it */
+ dev_dbg(dev, "unexpected MSI\n");
+ rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_msi_top_irq_ack(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void rcar_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void rcar_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip rcar_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = rcar_msi_top_irq_ack,
+ .irq_mask = rcar_msi_top_irq_mask,
+ .irq_unmask = rcar_msi_top_irq_unmask,
+};
+
+static void rcar_msi_irq_ack(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
+}
+
+static void rcar_msi_irq_mask(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
+
+static void rcar_msi_irq_unmask(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
+
+static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+
+ msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip rcar_msi_bottom_chip = {
+ .name = "R-Car MSI",
+ .irq_ack = rcar_msi_irq_ack,
+ .irq_mask = rcar_msi_irq_mask,
+ .irq_unmask = rcar_msi_irq_unmask,
+ .irq_set_affinity = rcar_msi_set_affinity,
+ .irq_compose_msi_msg = rcar_compose_msi_msg,
+};
+
+static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct rcar_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
+
+ mutex_lock(&msi->map_lock);
+
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &rcar_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct rcar_msi *msi = domain->host_data;
+
+ mutex_lock(&msi->map_lock);
+
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+}
+
+static const struct irq_domain_ops rcar_msi_domain_ops = {
+ .alloc = rcar_msi_domain_alloc,
+ .free = rcar_msi_domain_free,
+};
+
+static struct msi_domain_info rcar_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &rcar_msi_top_chip,
+};
+
+static int rcar_allocate_domains(struct rcar_msi *msi)
+{
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &rcar_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void rcar_free_domains(struct rcar_msi *msi)
+{
+ struct irq_domain *parent = msi->domain->parent;
+
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
+}
+
+static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct rcar_msi *msi = &host->msi;
+ struct resource res;
+ int err;
+
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ err = rcar_allocate_domains(msi);
+ if (err)
+ return err;
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_bottom_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_bottom_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* disable all MSIs */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /*
+ * Setup MSI data target using RC base address address, which
+ * is guaranteed to be in the low 32bit range on any R-Car HW.
+ */
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
+
+ return 0;
+
+err:
+ rcar_free_domains(msi);
+ return err;
+}
+
+static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ rcar_free_domains(&host->msi);
+}
+
+static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err, i;
+
+ host->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(host->phy))
+ return PTR_ERR(host->phy);
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ host->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(host->bus_clk)) {
+ dev_err(dev, "cannot get pcie bus clock\n");
+ return PTR_ERR(host->bus_clk);
+ }
+
+ i = irq_of_parse_and_map(dev->of_node, 0);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq1;
+ }
+ host->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(dev->of_node, 1);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq2;
+ }
+ host->msi.irq2 = i;
+
+ return 0;
+
+err_irq2:
+ irq_dispose_mapping(host->msi.irq1);
+err_irq1:
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct resource_entry *entry,
+ int *index)
+{
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size = resource_size(entry->res);
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
+ lower_32_bits(mask) | flags, idx, true);
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7791",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
+ {},
+};
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_host *host;
+ struct rcar_pcie *pcie;
+ u32 data;
+ int err;
+ struct pci_host_bridge *bridge;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
+ if (!bridge)
+ return -ENOMEM;
+
+ host = pci_host_bridge_priv(bridge);
+ pcie = &host->pcie;
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, host);
+
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_put;
+ }
+
+ err = rcar_pcie_get_resources(host);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(host->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ goto err_clk_disable;
+
+ host->phy_init_fn = of_device_get_match_data(dev);
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
+ }
+
+ /* Failure to get a link might just be that no cards are inserted */
+ if (rcar_pcie_hw_init(pcie)) {
+ dev_info(dev, "PCIe link down\n");
+ err = -ENODEV;
+ goto err_phy_shutdown;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(host);
+ if (err < 0) {
+ dev_err(dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ goto err_phy_shutdown;
+ }
+ }
+
+ err = rcar_pcie_enable(host);
+ if (err)
+ goto err_msi_teardown;
+
+ return 0;
+
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(host);
+
+err_phy_shutdown:
+ if (host->phy) {
+ phy_power_off(host->phy);
+ phy_exit(host->phy);
+ }
+
+err_clk_disable:
+ clk_disable_unprepare(host->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(host->msi.irq2);
+ irq_dispose_mapping(host->msi.irq1);
+
+err_pm_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return err;
+}
+
+static int rcar_pcie_resume(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int data;
+ int err;
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ return 0;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_info(dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct resource res;
+ u32 val;
+
+ of_address_to_resource(dev->of_node, 0, &res);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+
+ bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
+ rcar_pci_write_reg(pcie, val, PCIEMSIIER);
+ }
+
+ rcar_pcie_hw_enable(host);
+
+ return 0;
+}
+
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = "rcar-pcie",
+ .of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+
+#ifdef CONFIG_ARM
+static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ return !fixup_exception(regs);
+}
+
+static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
+ { .compatible = "renesas,pcie-r8a7779" },
+ { .compatible = "renesas,pcie-r8a7790" },
+ { .compatible = "renesas,pcie-r8a7791" },
+ { .compatible = "renesas,pcie-rcar-gen2" },
+ {},
+};
+
+static int __init rcar_pcie_init(void)
+{
+ if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
+#ifdef CONFIG_ARM_LPAE
+ hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "asynchronous external abort");
+#else
+ hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+#endif
+ }
+
+ return platform_driver_register(&rcar_pcie_driver);
+}
+device_initcall(rcar_pcie_init);
+#else
+builtin_platform_driver(rcar_pcie_driver);
+#endif
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
new file mode 100644
index 000000000..7583699ef
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "pcie-rcar.h"
+
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
+{
+ return readl(pcie->base + reg);
+}
+
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+{
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ val &= ~(mask << shift);
+ val |= data << shift;
+ rcar_pci_write_reg(pcie, val, where & ~3);
+}
+
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
+ return 0;
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10000;
+
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ udelay(5);
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window)
+{
+ /* Setup PCIe address space mappings for each resource */
+ struct resource *res = window->res;
+ resource_size_t res_start;
+ resource_size_t size;
+ u32 mask;
+
+ rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+
+ /*
+ * The PAMR mask is calculated in units of 128Bytes, which
+ * keeps things pretty simple.
+ */
+ size = resource_size(res);
+ if (size > 128)
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ else
+ mask = 0x0;
+ rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+
+ if (res->flags & IORESOURCE_IO)
+ res_start = pci_pio_to_address(res->start) - window->offset;
+ else
+ res_start = res->start - window->offset;
+
+ rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
+ rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
+ PCIEPALR(win));
+
+ /* First resource is for IO */
+ mask = PAR_ENABLE;
+ if (res->flags & IORESOURCE_IO)
+ mask |= IO_SPACE;
+
+ rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+}
+
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host)
+{
+ /*
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
+ */
+ if (host)
+ rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
+ PCIEPRAR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ rcar_pci_write_reg(pcie, flags, PCIELAMR(idx));
+
+ if (host)
+ rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
+ PCIEPRAR(idx + 1));
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx + 1));
+ rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
+}
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
new file mode 100644
index 000000000..6799f8139
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#ifndef _PCIE_RCAR_H
+#define _PCIE_RCAR_H
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define PCIECCTLR_CCIE BIT(31)
+#define TYPE0 (0 << 8)
+#define TYPE1 BIT(8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define ASTINTX BIT(16)
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define DL_DOWN BIT(3)
+#define CFINIT BIT(0)
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE BIT(0)
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST BIT(4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE BIT(0)
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
+
+/* PCIe address reg & mask */
+#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
+#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define INTDIS BIT(10)
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define MSICAP(x) (0x010050 + ((x) * 0x4))
+#define MSICAP0_MSIE BIT(16)
+#define MSICAP0_MMESCAP_OFFSET 17
+#define MSICAP0_MMESE_OFFSET 20
+#define MSICAP0_MMESE_MASK GENMASK(22, 20)
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR0 0x011000
+#define IDSETR1 0x011004
+#define SUBIDSETR 0x011024
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
+#define LINK_SPEED (0xf << 16)
+#define LINK_SPEED_2_5GTS (1 << 16)
+#define LINK_SPEED_5_0GTS (2 << 16)
+#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
+#define PMSR 0x01105c
+#define L1FAEG BIT(31)
+#define PMEL1RX BIT(23)
+#define PMSTATE GENMASK(18, 16)
+#define PMSTATE_L1 (3 << 16)
+#define PMCTLR 0x011060
+#define L1IATN BIT(31)
+
+#define MACS2R 0x011078
+#define MACCGSPSETR 0x011084
+#define SPCNGRSN BIT(31)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+
+/* R-Car Gen2 PHY */
+#define GEN2_PCIEPHYADDR 0x780
+#define GEN2_PCIEPHYDATA 0x784
+#define GEN2_PCIEPHYCTRL 0x78c
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+};
+
+enum {
+ RCAR_PCI_ACCESS_READ,
+ RCAR_PCI_ACCESS_WRITE,
+};
+
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg);
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg);
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data);
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie);
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie);
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window);
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host);
+
+#endif
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
new file mode 100644
index 000000000..0af0e965f
--- /dev/null
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @epc: PCI EPC device
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+ struct rockchip_pcie rockchip;
+ struct pci_epc *epc;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ u32 region)
+{
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ u32 r, u64 cpu_addr, u64 pci_addr,
+ size_t size)
+{
+ int num_pass_bits = fls64(size - 1);
+ u32 addr0, addr1, desc0;
+
+ if (num_pass_bits < 8)
+ num_pass_bits = 8;
+
+ addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(pci_addr);
+ desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
+
+ /* PCI bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_header *hdr)
+{
+ u32 reg;
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ /* All functions share the same vendor ID with function 0 */
+ if (fn == 0) {
+ u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+ (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+
+ rockchip_pcie_write(rockchip, vid_regs,
+ PCIE_CORE_CONFIG_VENDOR);
+ }
+
+ reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
+ reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
+ rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
+
+ rockchip_pcie_write(rockchip,
+ hdr->revid |
+ hdr->progif_code << 8 |
+ hdr->subclass_code << 16 |
+ hdr->baseclass_code << 24,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+ rockchip_pcie_write(rockchip, hdr->cache_line_size,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_CACHE_LINE_SIZE);
+ rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_SUBSYSTEM_VENDOR_ID);
+ rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_INTERRUPT_LINE);
+
+ return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = sz > SZ_2G;
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 reg, cfg, b, ctrl;
+ enum pci_barno bar = epf_bar->barno;
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static inline u32 rockchip_ob_region(phys_addr_t addr)
+{
+ return (addr >> ilog2(SZ_1M)) & 0x1f;
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr,
+ size_t size)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *pcie = &ep->rockchip;
+ u32 r = rockchip_ob_region(addr);
+
+ rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
+
+ set_bit(r, &ep->ob_region_map);
+ ep->ob_addr[r] = addr;
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r;
+
+ for (r = 0; r < ep->max_regions; r++)
+ if (ep->ob_addr[r] == addr)
+ break;
+
+ if (r == ep->max_regions)
+ return;
+
+ rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+ ep->ob_addr[r] = 0;
+ clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 multi_msg_cap)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+ flags |=
+ (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+ rockchip_pcie_write(rockchip, flags,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx, bool do_assert)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ intx &= 3;
+
+ if (do_assert) {
+ ep->irq_pending |= BIT(intx);
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_ASSERT |
+ PCIE_CLIENT_INT_PEND_ST_PEND,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
+ } else {
+ ep->irq_pending &= ~BIT(intx);
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_DEASSERT |
+ PCIE_CLIENT_INT_PEND_ST_NORMAL,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
+ }
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
+{
+ u16 cmd;
+
+ cmd = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ return -EINVAL;
+
+ /*
+ * Should add some delay between toggling INTx per TRM vaguely saying
+ * it depends on some cycles of the AHB bus clock to function it. So
+ * add sufficient 1ms here.
+ */
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+ mdelay(1);
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+ return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 flags, mme, data, data_mask;
+ u8 msi_count;
+ u64 pci_addr;
+ u32 r;
+
+ /* Check MSI enable bit */
+ flags = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Set MSI private data */
+ data_mask = msi_count - 1;
+ data = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_DATA_64);
+ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+ /* Get MSI PCI address */
+ pci_addr = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_LO);
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
+ ep->irq_pci_fn != fn)) {
+ r = rockchip_ob_region(ep->irq_phys_addr);
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+ ep->irq_phys_addr,
+ pci_addr & PCIE_ADDR_MASK,
+ ~PCIE_ADDR_MASK + 1);
+ ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
+ ep->irq_pci_fn = fn;
+ }
+
+ writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
+ return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+ case PCI_EPC_IRQ_MSI:
+ return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct pci_epf *epf;
+ u32 cfg;
+
+ cfg = BIT(0);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ cfg |= BIT(epf->func_no);
+
+ rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 256,
+};
+
+static const struct pci_epc_features*
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ return &rockchip_pcie_epc_features;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+ .write_header = rockchip_pcie_ep_write_header,
+ .set_bar = rockchip_pcie_ep_set_bar,
+ .clear_bar = rockchip_pcie_ep_clear_bar,
+ .map_addr = rockchip_pcie_ep_map_addr,
+ .unmap_addr = rockchip_pcie_ep_unmap_addr,
+ .set_msi = rockchip_pcie_ep_set_msi,
+ .get_msi = rockchip_pcie_ep_get_msi,
+ .raise_irq = rockchip_pcie_ep_raise_irq,
+ .start = rockchip_pcie_ep_start,
+ .get_features = rockchip_pcie_ep_get_features,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
+
+ err = of_property_read_u32(dev->of_node,
+ "rockchip,max-outbound-regions",
+ &ep->max_regions);
+ if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+ ep->max_regions = MAX_REGION_LIMIT;
+
+ ep->ob_region_map = 0;
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->epc->max_functions);
+ if (err < 0)
+ ep->epc->max_functions = 1;
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie-ep"},
+ {},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ size_t max_regions;
+ struct pci_epc_mem_window *windows = NULL;
+ int err, i;
+ u32 cfg_msi, cfg_msix_cp;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ /* Establish the link automatically */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ max_regions = ep->max_regions;
+ ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+
+ if (!ep->ob_addr) {
+ err = -ENOMEM;
+ goto err_uninit_port;
+ }
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ windows = devm_kcalloc(dev, ep->max_regions,
+ sizeof(struct pci_epc_mem_window), GFP_KERNEL);
+ if (!windows) {
+ err = -ENOMEM;
+ goto err_uninit_port;
+ }
+ for (i = 0; i < ep->max_regions; i++) {
+ windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
+ windows[i].size = SZ_1M;
+ windows[i].page_size = SZ_1M;
+ }
+ err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
+ devm_kfree(dev, windows);
+
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ goto err_uninit_port;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ SZ_1M);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
+ goto err_epc_mem_exit;
+ }
+
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+ /*
+ * MSI-X is not supported but the controller still advertises the MSI-X
+ * capability by default, which can lead to the Root Complex side
+ * allocating MSI-X vectors which cannot be used. Avoid this by skipping
+ * the MSI-X capability entry in the PCIe capabilities linked-list: get
+ * the next pointer from the MSI-X entry and set that in the MSI
+ * capability entry (which is the previous entry). This way the MSI-X
+ * entry is skipped (left out of the linked-list) and not advertised.
+ */
+ cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+
+ cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
+
+ cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
+ ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
+ ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
+
+ cfg_msi |= cfg_msix_cp;
+
+ rockchip_pcie_write(rockchip, cfg_msi,
+ PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ return 0;
+err_epc_mem_exit:
+ pci_epc_mem_exit(epc);
+err_uninit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+ .driver = {
+ .name = "rockchip-pcie-ep",
+ .of_match_table = rockchip_pcie_ep_of_match,
+ },
+ .probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
new file mode 100644
index 000000000..afbbdccd1
--- /dev/null
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /*
+ * Access only one slot on each root port.
+ * Do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent))
+ return dev == 0;
+
+ return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+ u8 map;
+
+ if (rockchip->legacy_phy)
+ return GENMASK(MAX_LANE_NUM - 1, 0);
+
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+ map = val & PCIE_CORE_LANE_MAP_MASK;
+
+ /* The link may be using a reverse-indexed mapping. */
+ if (val & PCIE_CORE_LANE_MAP_REVERSE)
+ map = bitrev8(map) >> 4;
+
+ return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+ void __iomem *addr;
+
+ offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (pci_is_root_bus(bus->parent))
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (pci_is_root_bus(bus->parent))
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4)
+ writel(val, addr);
+ else if (size == 2)
+ writew(val, addr);
+ else if (size == 1)
+ writeb(val, addr);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pci_is_root_bus(bus))
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pci_is_root_bus(bus))
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ int curr;
+ u32 status, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr <= 0)
+ return;
+
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err, i = MAX_LANE_NUM;
+ u32 status;
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ return err;
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ status, PCIE_LINK_UP(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ goto err_power_off_phy;
+ }
+
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ status, PCIE_LINK_IS_GEN2(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err)
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ /* Power off unused lane(s) */
+ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ if (!(rockchip->lanes_map & BIT(i))) {
+ dev_dbg(dev, "idling lane %d\n", i);
+ phy_power_off(rockchip->phys[i]);
+ }
+ }
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_CORE_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
+ PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+ /* Clear L0s from RC's link cap */
+ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+ status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+ status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+ return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+ if (ret)
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+ int irq, err;
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+ if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
+ return PTR_ERR(rockchip->vpcie12v);
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return PTR_ERR(rockchip->vpcie3v3);
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8))
+ return PTR_ERR(rockchip->vpcie1v8);
+
+ rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9))
+ return PTR_ERR(rockchip->vpcie0v9);
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie12v)) {
+ err = regulator_enable(rockchip->vpcie12v);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie12v regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_disable_12v;
+ }
+ }
+
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+
+ return 0;
+
+err_disable_1v8:
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
+ struct resource_entry *entry;
+ u64 pci_addr, size;
+ int offset;
+ int err;
+ int reg_no;
+
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+ rockchip->msg_bus_addr = pci_addr;
+
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ pci_addr + (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (!entry)
+ return -ENODEV;
+
+ /* store the register number offset to program RC io outbound ATU */
+ offset = size >> 20;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ pci_addr + (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ /* assign message regions */
+ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+ AXI_WRAPPER_NOR_MSG,
+ 20 - 1, 0, 0);
+
+ rockchip->msg_bus_addr += ((reg_no + offset) << 20);
+ return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+ u32 value;
+ int err;
+
+ /* send PME_TURN_OFF message */
+ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+ /* read LTSSM and wait for falling into L2 link state */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+ value, PCIE_LINK_IS_L2(value), 20,
+ jiffies_to_usecs(5 * HZ));
+ if (err) {
+ dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_suspend_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int ret;
+
+ /* disable core and cli int since we don't need to ack PME_ACK */
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+ PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+ ret = rockchip_pcie_wait_l2(rockchip);
+ if (ret) {
+ rockchip_pcie_enable_interrupts(rockchip);
+ return ret;
+ }
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ regulator_disable(rockchip->vpcie0v9);
+
+ return ret;
+}
+
+static int rockchip_pcie_resume_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
+ }
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_disable_0v9;
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_pcie_resume;
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_err_deinit_port;
+
+ /* Need this to enter L1 again */
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ return 0;
+
+err_err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+ rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+ regulator_disable(rockchip->vpcie0v9);
+ return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+ if (!bridge)
+ return -ENOMEM;
+
+ rockchip = pci_host_bridge_priv(bridge);
+
+ platform_set_drvdata(pdev, rockchip);
+ rockchip->dev = dev;
+ rockchip->is_rc = true;
+
+ err = rockchip_pcie_parse_host_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_deinit_port;
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_remove_irq_domain;
+
+ rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+ if (!rockchip->msg_region) {
+ err = -ENOMEM;
+ goto err_remove_irq_domain;
+ }
+
+ bridge->sysdata = rockchip;
+ bridge->ops = &rockchip_pcie_ops;
+
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
+ goto err_remove_irq_domain;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_remove_irq_domain;
+
+ return 0;
+
+err_remove_irq_domain:
+ irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static void rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
+
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ irq_domain_remove(rockchip->irq_domain);
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .pm = &rockchip_pcie_pm_ops,
+ },
+ .probe = rockchip_pcie_probe,
+ .remove_new = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
new file mode 100644
index 000000000..0ef2e622d
--- /dev/null
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int err;
+
+ if (rockchip->is_rc) {
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
+ } else {
+ rockchip->mem_res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem-base");
+ if (!rockchip->mem_res)
+ return -EINVAL;
+ }
+
+ rockchip->apb_base =
+ devm_platform_ioremap_resource_byname(pdev, "apb-base");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
+
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
+ }
+
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
+
+ rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
+ }
+
+ rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
+ }
+
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+ "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
+ }
+
+ rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
+
+ rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
+
+ rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
+
+ rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
+
+ if (rockchip->is_rc) {
+ rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
+ "failed to get ep GPIO\n");
+ }
+
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
+
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
+
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
+
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
+
+#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr)
+/* 100 ms max wait time for PHY PLLs to lock */
+#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000
+/* Sleep should be less than 20ms */
+#define RK_PHY_PLL_LOCK_SLEEP_US 1000
+
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err, i;
+ u32 regs;
+
+ err = reset_control_assert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "assert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "assert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "assert pm_rst err %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ err = phy_init(rockchip->phys[i]);
+ if (err) {
+ dev_err(dev, "init phy%d err %d\n", i, err);
+ goto err_exit_phy;
+ }
+ }
+
+ err = reset_control_assert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "assert core_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ err = reset_control_assert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "assert pipe_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert aclk_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert pclk_rst err %d\n", err);
+ goto err_exit_phy;
+ }
+
+ if (rockchip->link_gen == 2)
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+ else
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+ PCIE_CLIENT_CONFIG);
+
+ regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+ if (rockchip->is_rc)
+ regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ else
+ regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+ rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ err = phy_power_on(rockchip->phys[i]);
+ if (err) {
+ dev_err(dev, "power on phy%d err %d\n", i, err);
+ goto err_power_off_phy;
+ }
+ }
+
+ err = readx_poll_timeout(rockchip_pcie_read_addr,
+ PCIE_CLIENT_SIDE_BAND_STATUS,
+ regs, !(regs & PCIE_CLIENT_PHY_ST),
+ RK_PHY_PLL_LOCK_SLEEP_US,
+ RK_PHY_PLL_LOCK_TIMEOUT_US);
+ if (err) {
+ dev_err(dev, "PHY PLLs could not lock, %d\n", err);
+ goto err_power_off_phy;
+ }
+
+ /*
+ * Please don't reorder the deassert sequence of the following
+ * four reset pins.
+ */
+ err = reset_control_deassert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ goto err_power_off_phy;
+ }
+
+ err = reset_control_deassert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "deassert core_rst err %d\n", err);
+ goto err_power_off_phy;
+ }
+
+ err = reset_control_deassert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_rst err %d\n", err);
+ goto err_power_off_phy;
+ }
+
+ err = reset_control_deassert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "deassert pipe_rst err %d\n", err);
+ goto err_power_off_phy;
+ }
+
+ return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+err_exit_phy:
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
+
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct phy *phy;
+ char *name;
+ u32 i;
+
+ phy = devm_phy_get(dev, "pcie-phy");
+ if (!IS_ERR(phy)) {
+ rockchip->legacy_phy = true;
+ rockchip->phys[0] = phy;
+ dev_warn(dev, "legacy phy model is deprecated!\n");
+ return 0;
+ }
+
+ if (PTR_ERR(phy) == -EPROBE_DEFER)
+ return PTR_ERR(phy);
+
+ dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n");
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i);
+ if (!name)
+ return -ENOMEM;
+
+ phy = devm_of_phy_get(dev, dev->of_node, name);
+ kfree(name);
+
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) != -EPROBE_DEFER)
+ dev_err(dev, "missing phy for lane %d: %ld\n",
+ i, PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ rockchip->phys[i] = phy;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
+
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
+{
+ int i;
+
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ /* inactive lanes are already powered off */
+ if (rockchip->lanes_map & BIT(i))
+ phy_power_off(rockchip->phys[i]);
+ phy_exit(rockchip->phys[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
+
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = clk_prepare_enable(rockchip->aclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_pcie clock\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
+ goto err_aclk_perf_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->hclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_hclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ if (err) {
+ dev_err(dev, "unable to enable clk_pcie_pm clock\n");
+ goto err_clk_pcie_pm;
+ }
+
+ return 0;
+
+err_clk_pcie_pm:
+ clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+ clk_disable_unprepare(rockchip->aclk_pcie);
+ return err;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
+
+void rockchip_pcie_disable_clocks(void *data)
+{
+ struct rockchip_pcie *rockchip = data;
+
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+ clk_disable_unprepare(rockchip->hclk_pcie);
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+ clk_disable_unprepare(rockchip->aclk_pcie);
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
+
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type)
+{
+ u32 ob_desc_0;
+
+ /* Configuration Accesses for region 0 */
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+ ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+ ob_desc_0 |= (type | (0x1 << 23));
+ rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
new file mode 100644
index 000000000..6111de35f
--- /dev/null
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM 4
+#define MAX_REGION_LIMIT 32
+#define MIN_EP_APERTURE 28
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
+#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
+#define PCIE_CLIENT_PHY_ST BIT(12)
+#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
+#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
+#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
+#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
+#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE 0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
+#define PCIE_EP_PF_CONFIG_REGS_BASE 0x800000
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
+#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
+#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_ADDR_MASK GENMASK_ULL(63, MIN_AXI_ADDR_BITS_PASSED)
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+#define AXI_WRAPPER_TYPE0_CFG 0xa
+#define AXI_WRAPPER_TYPE1_CFG 0xb
+#define AXI_WRAPPER_NOR_MSG 0xc
+
+#define PCIE_RC_SEND_PME_OFF 0x11960
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_LINK_IS_L2(x) \
+ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+ (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+ (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+ (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
+#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_CP1_OFFSET 8
+#define ROCKCHIP_PCIE_EP_MSI_CP1_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET 16
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_REG 0xb0
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_OFFSET 8
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
+#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
+ (PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
+#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
+ (PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (PCIE_CORE_AXI_CONF_BASE + 0x082c + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & \
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define PCIE_RC_EP_ATR_OB_REGIONS_1_32 (PCIE_CORE_AXI_CONF_BASE + 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC2(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0010 + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ bool legacy_phy;
+ struct phy *phys[MAX_LANE_NUM];
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie12v; /* 12V power supply */
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 lanes_map;
+ int link_gen;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ int offset;
+ void __iomem *msg_region;
+ phys_addr_t msg_bus_addr;
+ bool is_rc;
+ struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
new file mode 100644
index 000000000..4a787a941
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
+ *
+ * (C) Copyright 2019 - 2020, Xilinx, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pci-ecam.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
+#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
+#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
+#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
+#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
+#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
+#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
+#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
+#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
+#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+
+#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
+/* Interrupt registers definitions */
+#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
+#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
+#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
+#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
+#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
+#define XILINX_CPM_PCIE_INTR_NONFATAL 10
+#define XILINX_CPM_PCIE_INTR_FATAL 11
+#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
+#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
+#define XILINX_CPM_PCIE_INTR_INTX 16
+#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
+#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
+#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
+#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
+#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
+#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
+#define XILINX_CPM_PCIE_INTR_MST_DECERR 26
+#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
+#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
+
+#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
+
+#define XILINX_CPM_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(LINK_DOWN) | \
+ IMR(HOT_RESET) | \
+ IMR(CFG_PCIE_TIMEOUT) | \
+ IMR(CFG_TIMEOUT) | \
+ IMR(CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ IMR(CFG_ERR_POISON) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(INTX) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(SLV_UNSUPP) | \
+ IMR(SLV_UNEXP) | \
+ IMR(SLV_COMPL) | \
+ IMR(SLV_ERRP) | \
+ IMR(SLV_CMPABT) | \
+ IMR(SLV_ILLBUR) | \
+ IMR(MST_DECERR) | \
+ IMR(MST_SLVERR) | \
+ IMR(SLV_PCIE_TIMEOUT) \
+ )
+
+#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
+#define XILINX_CPM_PCIE_IDRN_SHIFT 16
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Status/control Register definitions */
+#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+
+enum xilinx_cpm_version {
+ CPM,
+ CPM5,
+};
+
+/**
+ * struct xilinx_cpm_variant - CPM variant information
+ * @version: CPM version
+ */
+struct xilinx_cpm_variant {
+ enum xilinx_cpm_version version;
+};
+
+/**
+ * struct xilinx_cpm_pcie - PCIe port information
+ * @dev: Device pointer
+ * @reg_base: Bridge Register Base
+ * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
+ * @intx_domain: Legacy IRQ domain pointer
+ * @cpm_domain: CPM IRQ domain pointer
+ * @cfg: Holds mappings of config space window
+ * @intx_irq: legacy interrupt number
+ * @irq: Error interrupt number
+ * @lock: lock protecting shared register access
+ * @variant: CPM version check pointer
+ */
+struct xilinx_cpm_pcie {
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *cpm_base;
+ struct irq_domain *intx_domain;
+ struct irq_domain *cpm_domain;
+ struct pci_config_window *cfg;
+ int intx_irq;
+ int irq;
+ raw_spinlock_t lock;
+ const struct xilinx_cpm_variant *variant;
+};
+
+static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
+{
+ return readl_relaxed(port->reg_base + reg);
+}
+
+static void pcie_write(struct xilinx_cpm_pcie *port,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, port->reg_base + reg);
+}
+
+static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port)
+{
+ return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
+ XILINX_CPM_PCIE_REG_PSCR_LNKUP);
+}
+
+static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port)
+{
+ unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
+
+ if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_RPEFR);
+ }
+}
+
+static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
+{
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
+ pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
+{
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
+ pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip xilinx_cpm_leg_irq_chip = {
+ .name = "INTx",
+ .irq_mask = xilinx_cpm_mask_leg_irq,
+ .irq_unmask = xilinx_cpm_unmask_leg_irq,
+};
+
+/**
+ * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_cpm_pcie_intx_map,
+};
+
+static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
+{
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long val;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
+ pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
+
+ for_each_set_bit(i, &val, PCI_NUM_INTX)
+ generic_handle_domain_irq(port->intx_domain, i);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void xilinx_cpm_mask_event_irq(struct irq_data *d)
+{
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ val &= ~BIT(d->hwirq);
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
+{
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ val |= BIT(d->hwirq);
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip xilinx_cpm_event_irq_chip = {
+ .name = "RC-Event",
+ .irq_mask = xilinx_cpm_mask_event_irq,
+ .irq_unmask = xilinx_cpm_unmask_event_irq,
+};
+
+static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = xilinx_cpm_pcie_event_map,
+};
+
+static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
+{
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long val;
+ int i;
+
+ chained_irq_enter(chip, desc);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
+ val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(port->cpm_domain, i);
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
+
+ if (port->variant->version == CPM5) {
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (val)
+ writel_relaxed(val, port->cpm_base +
+ XILINX_CPM_PCIE_IR_STATUS);
+ }
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
+ * CPM SLCR block.
+ */
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+ if (val)
+ writel_relaxed(val,
+ port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+
+ chained_irq_exit(chip, desc);
+}
+
+#define _IC(x, s) \
+ [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(LINK_DOWN, "Link Down"),
+ _IC(HOT_RESET, "Hot reset"),
+ _IC(CFG_TIMEOUT, "ECAM access timeout"),
+ _IC(CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+ _IC(SLV_UNSUPP, "Slave unsupported request"),
+ _IC(SLV_UNEXP, "Slave unexpected completion"),
+ _IC(SLV_COMPL, "Slave completion timeout"),
+ _IC(SLV_ERRP, "Slave Error Poison"),
+ _IC(SLV_CMPABT, "Slave Completer Abort"),
+ _IC(SLV_ILLBUR, "Slave Illegal Burst"),
+ _IC(MST_DECERR, "Master decode error"),
+ _IC(MST_SLVERR, "Master slave error"),
+ _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"),
+ _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"),
+};
+
+static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
+{
+ struct xilinx_cpm_pcie *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(port->cpm_domain, irq);
+
+ switch (d->hwirq) {
+ case XILINX_CPM_PCIE_INTR_CORRECTABLE:
+ case XILINX_CPM_PCIE_INTR_NONFATAL:
+ case XILINX_CPM_PCIE_INTR_FATAL:
+ cpm_pcie_clear_err_interrupts(port);
+ fallthrough;
+
+ default:
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port)
+{
+ if (port->intx_domain) {
+ irq_domain_remove(port->intx_domain);
+ port->intx_domain = NULL;
+ }
+
+ if (port->cpm_domain) {
+ irq_domain_remove(port->cpm_domain);
+ port->cpm_domain = NULL;
+ }
+}
+
+/**
+ * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
+ &event_domain_ops,
+ port);
+ if (!port->cpm_domain)
+ goto out;
+
+ irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops,
+ port);
+ if (!port->intx_domain)
+ goto out;
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return 0;
+out:
+ xilinx_cpm_free_irq_domains(port);
+ of_node_put(pcie_intc_node);
+ dev_err(dev, "Failed to allocate IRQ domains\n");
+
+ return -ENOMEM;
+}
+
+static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, irq;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ int err;
+
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(port->cpm_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
+ 0, intr_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->cpm_domain,
+ XILINX_CPM_PCIE_INTR_INTX);
+ if (!port->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq,
+ xilinx_cpm_pcie_intx_flow, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq,
+ xilinx_cpm_pcie_event_flow, port);
+
+ return 0;
+}
+
+/**
+ * xilinx_cpm_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
+{
+ if (cpm_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
+ XILINX_CPM_PCIE_IMR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IDR);
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
+ * CPM SLCR block.
+ */
+ writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+
+ if (port->variant->version == CPM5) {
+ writel(XILINX_CPM_PCIE_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ }
+
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
+ XILINX_CPM_PCIE_REG_RPSC_BEN,
+ XILINX_CPM_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_cpm_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ * @bus_range: Bus resource
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
+ struct resource *bus_range)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_slcr");
+ if (IS_ERR(port->cpm_base))
+ return PTR_ERR(port->cpm_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res)
+ return -ENXIO;
+
+ port->cfg = pci_ecam_create(dev, res, bus_range,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(port->cfg))
+ return PTR_ERR(port->cfg);
+
+ if (port->variant->version == CPM5) {
+ port->reg_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_csr");
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ } else {
+ port->reg_base = port->cfg->win;
+ }
+
+ return 0;
+}
+
+static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port)
+{
+ irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+}
+
+/**
+ * xilinx_cpm_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_cpm_pcie *port;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *bus;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ port->variant = of_device_get_match_data(dev);
+
+ err = xilinx_cpm_pcie_parse_dt(port, bus->res);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ goto err_parse_dt;
+ }
+
+ xilinx_cpm_pcie_init_port(port);
+
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
+
+ bridge->sysdata = port->cfg;
+ bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_host_bridge;
+
+ return 0;
+
+err_host_bridge:
+ xilinx_cpm_free_interrupts(port);
+err_setup_irq:
+ pci_ecam_free(port->cfg);
+err_parse_dt:
+ xilinx_cpm_free_irq_domains(port);
+ return err;
+}
+
+static const struct xilinx_cpm_variant cpm_host = {
+ .version = CPM,
+};
+
+static const struct xilinx_cpm_variant cpm5_host = {
+ .version = CPM5,
+};
+
+static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
+ {
+ .compatible = "xlnx,versal-cpm-host-1.00",
+ .data = &cpm_host,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5-host",
+ .data = &cpm5_host,
+ },
+ {}
+};
+
+static struct platform_driver xilinx_cpm_pcie_driver = {
+ .driver = {
+ .name = "xilinx-cpm-pcie",
+ .of_match_table = xilinx_cpm_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_cpm_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_cpm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
new file mode 100644
index 000000000..176686bdb
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for NWL PCIe Bridge
+ * Based on pcie-xilinx.c, pci-tegra.c
+ *
+ * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include "../pci.h"
+
+/* Bridge core config registers */
+#define BRCFG_PCIE_RX0 0x00000000
+#define BRCFG_PCIE_RX1 0x00000004
+#define BRCFG_INTERRUPT 0x00000010
+#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
+
+/* Egress - Bridge translation registers */
+#define E_BREG_CAPABILITIES 0x00000200
+#define E_BREG_CONTROL 0x00000208
+#define E_BREG_BASE_LO 0x00000210
+#define E_BREG_BASE_HI 0x00000214
+#define E_ECAM_CAPABILITIES 0x00000220
+#define E_ECAM_CONTROL 0x00000228
+#define E_ECAM_BASE_LO 0x00000230
+#define E_ECAM_BASE_HI 0x00000234
+
+/* Ingress - address translations */
+#define I_MSII_CAPABILITIES 0x00000300
+#define I_MSII_CONTROL 0x00000308
+#define I_MSII_BASE_LO 0x00000310
+#define I_MSII_BASE_HI 0x00000314
+
+#define I_ISUB_CONTROL 0x000003E8
+#define SET_ISUB_CONTROL BIT(0)
+/* Rxed msg fifo - Interrupt status registers */
+#define MSGF_MISC_STATUS 0x00000400
+#define MSGF_MISC_MASK 0x00000404
+#define MSGF_LEG_STATUS 0x00000420
+#define MSGF_LEG_MASK 0x00000424
+#define MSGF_MSI_STATUS_LO 0x00000440
+#define MSGF_MSI_STATUS_HI 0x00000444
+#define MSGF_MSI_MASK_LO 0x00000448
+#define MSGF_MSI_MASK_HI 0x0000044C
+
+/* Msg filter mask bits */
+#define CFG_ENABLE_PM_MSG_FWD BIT(1)
+#define CFG_ENABLE_INT_MSG_FWD BIT(2)
+#define CFG_ENABLE_ERR_MSG_FWD BIT(3)
+#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \
+ CFG_ENABLE_INT_MSG_FWD | \
+ CFG_ENABLE_ERR_MSG_FWD)
+
+/* Misc interrupt status mask bits */
+#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0)
+#define MSGF_MISC_SR_RXMSG_OVER BIT(1)
+#define MSGF_MISC_SR_SLAVE_ERR BIT(4)
+#define MSGF_MISC_SR_MASTER_ERR BIT(5)
+#define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
+#define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
+#define MSGF_MISC_SR_FATAL_AER BIT(16)
+#define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
+#define MSGF_MISC_SR_CORR_AER BIT(18)
+#define MSGF_MISC_SR_UR_DETECT BIT(20)
+#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
+#define MSGF_MISC_SR_FATAL_DEV BIT(23)
+#define MSGF_MISC_SR_LINK_DOWN BIT(24)
+#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+
+#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
+ MSGF_MISC_SR_RXMSG_OVER | \
+ MSGF_MISC_SR_SLAVE_ERR | \
+ MSGF_MISC_SR_MASTER_ERR | \
+ MSGF_MISC_SR_I_ADDR_ERR | \
+ MSGF_MISC_SR_E_ADDR_ERR | \
+ MSGF_MISC_SR_FATAL_AER | \
+ MSGF_MISC_SR_NON_FATAL_AER | \
+ MSGF_MISC_SR_CORR_AER | \
+ MSGF_MISC_SR_UR_DETECT | \
+ MSGF_MISC_SR_NON_FATAL_DEV | \
+ MSGF_MISC_SR_FATAL_DEV | \
+ MSGF_MISC_SR_LINK_DOWN | \
+ MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MSIC_SR_LINK_BWIDTH)
+
+/* Legacy interrupt status mask bits */
+#define MSGF_LEG_SR_INTA BIT(0)
+#define MSGF_LEG_SR_INTB BIT(1)
+#define MSGF_LEG_SR_INTC BIT(2)
+#define MSGF_LEG_SR_INTD BIT(3)
+#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
+ MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
+
+/* MSI interrupt status mask bits */
+#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
+#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
+
+#define MSII_PRESENT BIT(0)
+#define MSII_ENABLE BIT(0)
+#define MSII_STATUS_ENABLE BIT(15)
+
+/* Bridge config interrupt mask */
+#define BRCFG_INTERRUPT_MASK BIT(0)
+#define BREG_PRESENT BIT(0)
+#define BREG_ENABLE BIT(0)
+#define BREG_ENABLE_FORCE BIT(1)
+
+/* E_ECAM status mask bits */
+#define E_ECAM_PRESENT BIT(0)
+#define E_ECAM_CR_ENABLE BIT(0)
+#define E_ECAM_SIZE_LOC GENMASK(20, 16)
+#define E_ECAM_SIZE_SHIFT 16
+#define NWL_ECAM_VALUE_DEFAULT 12
+
+#define CFG_DMA_REG_BAR GENMASK(2, 0)
+#define CFG_PCIE_CACHE GENMASK(7, 0)
+
+#define INT_PCI_MSI_NR (2 * 32)
+
+/* Readin the PS_LINKUP */
+#define PS_LINKUP_OFFSET 0x00000238
+#define PCIE_PHY_LINKUP_BIT BIT(0)
+#define PHY_RDY_LINKUP_BIT BIT(1)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct nwl_msi { /* MSI information */
+ struct irq_domain *msi_domain;
+ DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ int irq_msi0;
+ int irq_msi1;
+};
+
+struct nwl_pcie {
+ struct device *dev;
+ void __iomem *breg_base;
+ void __iomem *pcireg_base;
+ void __iomem *ecam_base;
+ phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
+ phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
+ phys_addr_t phys_ecam_base; /* Physical Configuration Base */
+ u32 breg_size;
+ u32 pcie_reg_size;
+ u32 ecam_size;
+ int irq_intx;
+ int irq_misc;
+ u32 ecam_value;
+ u8 last_busno;
+ struct nwl_msi msi;
+ struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
+ raw_spinlock_t leg_mask_lock;
+};
+
+static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
+{
+ return readl(pcie->breg_base + off);
+}
+
+static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
+{
+ writel(val, pcie->breg_base + off);
+}
+
+static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
+{
+ if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
+ return true;
+ return false;
+}
+
+static bool nwl_phy_link_up(struct nwl_pcie *pcie)
+{
+ if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
+ return true;
+ return false;
+}
+
+static int nwl_wait_for_link(struct nwl_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (nwl_phy_link_up(pcie))
+ return 0;
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(dev, "PHY link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct nwl_pcie *pcie = bus->sysdata;
+
+ /* Check link before accessing downstream ports */
+ if (!pci_is_root_bus(bus)) {
+ if (!nwl_pcie_link_up(pcie))
+ return false;
+ } else if (devfn > 0)
+ /* Only one device down on each root port */
+ return false;
+
+ return true;
+}
+
+/**
+ * nwl_pcie_map_bus - Get configuration base
+ *
+ * @bus: Bus structure of current bus
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct nwl_pcie *pcie = bus->sysdata;
+
+ if (!nwl_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ return pcie->ecam_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+
+/* PCIe operations */
+static struct pci_ops nwl_pcie_ops = {
+ .map_bus = nwl_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
+{
+ struct nwl_pcie *pcie = data;
+ struct device *dev = pcie->dev;
+ u32 misc_stat;
+
+ /* Checking for misc interrupts */
+ misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+ MSGF_MISC_SR_MASKALL;
+ if (!misc_stat)
+ return IRQ_NONE;
+
+ if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
+ dev_err(dev, "Received Message FIFO Overflow\n");
+
+ if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
+ dev_err(dev, "Slave error\n");
+
+ if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
+ dev_err(dev, "Master error\n");
+
+ if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
+ dev_err(dev, "In Misc Ingress address translation error\n");
+
+ if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
+ dev_err(dev, "In Misc Egress address translation error\n");
+
+ if (misc_stat & MSGF_MISC_SR_FATAL_AER)
+ dev_err(dev, "Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
+ dev_err(dev, "Non-Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_CORR_AER)
+ dev_err(dev, "Correctable Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_UR_DETECT)
+ dev_err(dev, "Unsupported request Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
+ dev_err(dev, "Non-Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
+ dev_err(dev, "Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
+
+ if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ dev_info(dev, "Link Bandwidth Management Status bit set\n");
+
+ /* Clear misc interrupt status */
+ nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void nwl_pcie_leg_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie;
+ unsigned long status;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+
+ while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+ MSGF_LEG_SR_MASKALL) != 0) {
+ for_each_set_bit(bit, &status, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
+{
+ struct nwl_msi *msi = &pcie->msi;
+ unsigned long status;
+ u32 bit;
+
+ while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ nwl_bridge_writel(pcie, 1 << bit, status_reg);
+ generic_handle_domain_irq(msi->dev_domain, bit);
+ }
+ }
+}
+
+static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
+ chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
+ chained_irq_exit(chip, desc);
+}
+
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = 1 << (data->hwirq - 1);
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+ raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = 1 << (data->hwirq - 1);
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+ raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+ .name = "nwl_pcie:legacy",
+ .irq_enable = nwl_unmask_leg_irq,
+ .irq_disable = nwl_mask_leg_irq,
+ .irq_mask = nwl_mask_leg_irq,
+ .irq_unmask = nwl_unmask_leg_irq,
+};
+
+static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops legacy_domain_ops = {
+ .map = nwl_legacy_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+#ifdef CONFIG_PCI_MSI
+static struct irq_chip nwl_msi_irq_chip = {
+ .name = "nwl_pcie:msi",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info nwl_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &nwl_msi_irq_chip,
+};
+#endif
+
+static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int nwl_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip nwl_irq_chip = {
+ .name = "Xilinx MSI",
+ .irq_compose_msi_msg = nwl_compose_msi_msg,
+ .irq_set_affinity = nwl_msi_set_affinity,
+};
+
+static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct nwl_pcie *pcie = domain->host_data;
+ struct nwl_msi *msi = &pcie->msi;
+ int bit;
+ int i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct nwl_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = nwl_irq_domain_alloc,
+ .free = nwl_irq_domain_free,
+};
+
+static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
+{
+#ifdef CONFIG_PCI_MSI
+ struct device *dev = pcie->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct nwl_msi *msi = &pcie->msi;
+
+ msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
+ &dev_msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create dev IRQ domain\n");
+ return -ENOMEM;
+ }
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &nwl_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create msi IRQ domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *legacy_intc_node;
+
+ legacy_intc_node = of_get_next_child(node, NULL);
+ if (!legacy_intc_node) {
+ dev_err(dev, "No legacy intc node found\n");
+ return -EINVAL;
+ }
+
+ pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
+ PCI_NUM_INTX,
+ &legacy_domain_ops,
+ pcie);
+ of_node_put(legacy_intc_node);
+ if (!pcie->legacy_irq_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_init(&pcie->leg_mask_lock);
+ nwl_pcie_init_msi_irq_domain(pcie);
+ return 0;
+}
+
+static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nwl_msi *msi = &pcie->msi;
+ unsigned long base;
+ int ret;
+
+ mutex_init(&msi->lock);
+
+ /* Get msi_1 IRQ number */
+ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (msi->irq_msi1 < 0)
+ return -EINVAL;
+
+ irq_set_chained_handler_and_data(msi->irq_msi1,
+ nwl_pcie_msi_handler_high, pcie);
+
+ /* Get msi_0 IRQ number */
+ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (msi->irq_msi0 < 0)
+ return -EINVAL;
+
+ irq_set_chained_handler_and_data(msi->irq_msi0,
+ nwl_pcie_msi_handler_low, pcie);
+
+ /* Check for msii_present bit */
+ ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
+ if (!ret) {
+ dev_err(dev, "MSI not present\n");
+ return -EIO;
+ }
+
+ /* Enable MSII */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+ MSII_ENABLE, I_MSII_CONTROL);
+
+ /* Enable MSII status */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+ MSII_STATUS_ENABLE, I_MSII_CONTROL);
+
+ /* setup AFI/FPCI range */
+ base = pcie->phys_pcie_reg_base;
+ nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
+
+ /*
+ * For high range MSI interrupts: disable, clear any pending,
+ * and enable
+ */
+ nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) &
+ MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
+
+ nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
+
+ /*
+ * For low range MSI interrupts: disable, clear any pending,
+ * and enable
+ */
+ nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
+ MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
+
+ nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
+
+ return 0;
+}
+
+static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 breg_val, ecam_val, first_busno = 0;
+ int err;
+
+ breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
+ if (!breg_val) {
+ dev_err(dev, "BREG is not present\n");
+ return breg_val;
+ }
+
+ /* Write bridge_off to breg base */
+ nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
+ E_BREG_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
+ E_BREG_BASE_HI);
+
+ /* Enable BREG */
+ nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
+ E_BREG_CONTROL);
+
+ /* Disable DMA channel registers */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
+ CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
+
+ /* Enable Ingress subtractive decode translation */
+ nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
+
+ /* Enable msg filtering details */
+ nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
+ BRCFG_PCIE_RX_MSG_FILTER);
+
+ /* This routes the PCIe DMA traffic to go through CCI path */
+ if (of_dma_is_coherent(dev->of_node))
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) |
+ CFG_PCIE_CACHE, BRCFG_PCIE_RX1);
+
+ err = nwl_wait_for_link(pcie);
+ if (err)
+ return err;
+
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
+ if (!ecam_val) {
+ dev_err(dev, "ECAM is not present\n");
+ return ecam_val;
+ }
+
+ /* Enable ECAM */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+ E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+ (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
+ E_ECAM_CONTROL);
+
+ nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
+ E_ECAM_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
+ E_ECAM_BASE_HI);
+
+ /* Get bus range */
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
+ /* Write primary, secondary and subordinate bus numbers */
+ ecam_val = first_busno;
+ ecam_val |= (first_busno + 1) << 8;
+ ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
+ writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
+
+ if (nwl_pcie_link_up(pcie))
+ dev_info(dev, "Link is UP\n");
+ else
+ dev_info(dev, "Link is DOWN\n");
+
+ /* Get misc IRQ number */
+ pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (pcie->irq_misc < 0)
+ return -EINVAL;
+
+ err = devm_request_irq(dev, pcie->irq_misc,
+ nwl_pcie_misc_handler, IRQF_SHARED,
+ "nwl_pcie:misc", pcie);
+ if (err) {
+ dev_err(dev, "fail to register misc IRQ#%d\n",
+ pcie->irq_misc);
+ return err;
+ }
+
+ /* Disable all misc interrupts */
+ nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+ /* Clear pending misc interrupts */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+ MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
+
+ /* Enable all misc interrupts */
+ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+ /* Disable all legacy interrupts */
+ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+ /* Clear pending legacy interrupts */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+ MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+
+ /* Enable all legacy interrupts */
+ nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+ /* Enable the bridge config interrupt */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
+ BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
+
+ return 0;
+}
+
+static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = pcie->dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ pcie->breg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->breg_base))
+ return PTR_ERR(pcie->breg_base);
+ pcie->phys_breg_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
+ pcie->pcireg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->pcireg_base))
+ return PTR_ERR(pcie->pcireg_base);
+ pcie->phys_pcie_reg_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->ecam_base))
+ return PTR_ERR(pcie->ecam_base);
+ pcie->phys_ecam_base = res->start;
+
+ /* Get intx IRQ number */
+ pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
+ if (pcie->irq_intx < 0)
+ return pcie->irq_intx;
+
+ irq_set_chained_handler_and_data(pcie->irq_intx,
+ nwl_pcie_leg_handler, pcie);
+
+ return 0;
+}
+
+static const struct of_device_id nwl_pcie_of_match[] = {
+ { .compatible = "xlnx,nwl-pcie-2.11", },
+ {}
+};
+
+static int nwl_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nwl_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+
+ pcie->dev = dev;
+ pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
+
+ err = nwl_pcie_parse_dt(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
+ err = nwl_pcie_bridge_init(pcie);
+ if (err) {
+ dev_err(dev, "HW Initialization failed\n");
+ return err;
+ }
+
+ err = nwl_pcie_init_irq_domain(pcie);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ bridge->sysdata = pcie;
+ bridge->ops = &nwl_pcie_ops;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = nwl_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
+ return err;
+ }
+ }
+
+ return pci_host_probe(bridge);
+}
+
+static struct platform_driver nwl_pcie_driver = {
+ .driver = {
+ .name = "nwl-pcie",
+ .suppress_bind_attrs = true,
+ .of_match_table = nwl_pcie_of_match,
+ },
+ .probe = nwl_pcie_probe,
+};
+builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
new file mode 100644
index 000000000..cb6e9f7b0
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Xilinx AXI PCIe Bridge
+ *
+ * Copyright (c) 2012 - 2014 Xilinx, Inc.
+ *
+ * Based on the Tegra PCIe driver
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
+#define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 128
+
+/**
+ * struct xilinx_pcie - PCIe port information
+ * @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
+ * @msi_map: Bitmap of allocated MSIs
+ * @map_lock: Mutex protecting the MSI allocation
+ * @msi_domain: MSI IRQ domain pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @resources: Bus Resources
+ */
+struct xilinx_pcie {
+ struct device *dev;
+ void __iomem *reg_base;
+ unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)];
+ struct mutex map_lock;
+ struct irq_domain *msi_domain;
+ struct irq_domain *leg_domain;
+ struct list_head resources;
+};
+
+static inline u32 pcie_read(struct xilinx_pcie *pcie, u32 reg)
+{
+ return readl(pcie->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie *pcie)
+{
+ return (pcie_read(pcie, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @pcie: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned long val = pcie_read(pcie, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(pcie, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie *pcie = bus->sysdata;
+
+ /* Check if link is up when trying to access downstream pcie ports */
+ if (!pci_is_root_bus(bus)) {
+ if (!xilinx_pcie_link_up(pcie))
+ return false;
+ } else if (devfn > 0) {
+ /* Only one device down on each root port */
+ return false;
+ }
+ return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie *pcie = bus->sysdata;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ return pcie->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* MSI functions */
+
+static void xilinx_msi_top_irq_ack(struct irq_data *d)
+{
+ /*
+ * xilinx_pcie_intr_handler() will have performed the Ack.
+ * Eventually, this should be fixed and the Ack be moved in
+ * the respective callbacks for INTx and MSI.
+ */
+}
+
+static struct irq_chip xilinx_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = xilinx_msi_top_irq_ack,
+};
+
+static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
+
+ msg->address_lo = lower_32_bits(pa);
+ msg->address_hi = upper_32_bits(pa);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip xilinx_msi_bottom_chip = {
+ .name = "Xilinx MSI",
+ .irq_set_affinity = xilinx_msi_set_affinity,
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+};
+
+static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie *pcie = domain->host_data;
+ int hwirq, i;
+
+ mutex_lock(&pcie->map_lock);
+
+ hwirq = bitmap_find_free_region(pcie->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &xilinx_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie *pcie = domain->host_data;
+
+ mutex_lock(&pcie->map_lock);
+
+ bitmap_release_region(pcie->msi_map, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->map_lock);
+}
+
+static const struct irq_domain_ops xilinx_msi_domain_ops = {
+ .alloc = xilinx_msi_domain_alloc,
+ .free = xilinx_msi_domain_free,
+};
+
+static struct msi_domain_info xilinx_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .chip = &xilinx_msi_top_chip,
+};
+
+static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
+ &xilinx_msi_domain_ops, pcie);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ if (!pcie->msi_domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
+{
+ struct irq_domain *parent = pcie->msi_domain->parent;
+
+ irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(parent);
+}
+
+/* INTx Functions */
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+/* PCIe HW Functions */
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie *pcie = (struct xilinx_pcie *)data;
+ struct device *dev = pcie->dev;
+ u32 val, mask, status;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(pcie, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(pcie, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_ECRC_ERR)
+ dev_warn(dev, "ECRC failed\n");
+
+ if (status & XILINX_PCIE_INTR_STR_ERR)
+ dev_warn(dev, "Streaming error\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(pcie);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(pcie);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(pcie);
+ }
+
+ if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
+ struct irq_domain *domain;
+
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR1);
+
+ /* Check whether interrupt valid */
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(dev, "RP Intr FIFO1 read error\n");
+ goto error;
+ }
+
+ /* Decode the IRQ number */
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+ domain = pcie->msi_domain->parent;
+ } else {
+ val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT;
+ domain = pcie->leg_domain;
+ }
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(pcie, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ generic_handle_domain_irq(domain, val);
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(dev, "Master slave error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_ERRP)
+ dev_warn(dev, "Master error poison\n");
+
+error:
+ /* Clear the Interrupt Decode register */
+ pcie_write(pcie, status, XILINX_PCIE_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @pcie: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *pcie_intc_node;
+ int ret;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(dev->of_node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops,
+ pcie);
+ of_node_put(pcie_intc_node);
+ if (!pcie->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ /* Setup MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
+
+ ret = xilinx_allocate_msi_domains(pcie);
+ if (ret)
+ return ret;
+
+ pcie_write(pcie, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(pcie, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @pcie: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+
+ if (xilinx_pcie_link_up(pcie))
+ dev_info(dev, "PCIe Link is UP\n");
+ else
+ dev_info(dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(pcie, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts we handle */
+ pcie_write(pcie, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
+
+ /* Enable the Bridge enable bit */
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @pcie: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ unsigned int irq;
+ int err;
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ pcie->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
+ if (IS_ERR(pcie->reg_base))
+ return PTR_ERR(pcie->reg_base);
+
+ irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", pcie);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n", irq);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xilinx_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ mutex_init(&pcie->map_lock);
+ pcie->dev = dev;
+
+ err = xilinx_pcie_parse_dt(pcie);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(pcie);
+
+ err = xilinx_pcie_init_irq_domain(pcie);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ bridge->sysdata = pcie;
+ bridge->ops = &xilinx_pcie_ops;
+
+ err = pci_host_probe(bridge);
+ if (err)
+ xilinx_free_msi_domains(pcie);
+
+ return err;
+}
+
+static const struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,axi-pcie-host-1.00.a", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+};
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
new file mode 100644
index 000000000..6ac0afae0
--- /dev/null
+++ b/drivers/pci/controller/vmd.c
@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Volume Management Device driver
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/srcu.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+#include <asm/irqdomain.h>
+
+#define VMD_CFGBAR 0
+#define VMD_MEMBAR1 2
+#define VMD_MEMBAR2 4
+
+#define PCI_REG_VMCAP 0x40
+#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
+#define PCI_REG_VMCONFIG 0x44
+#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define VMCONFIG_MSI_REMAP 0x2
+#define PCI_REG_VMLOCK 0x70
+#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+
+#define MB2_SHADOW_OFFSET 0x2000
+#define MB2_SHADOW_SIZE 16
+
+enum vmd_features {
+ /*
+ * Device may contain registers which hint the physical location of the
+ * membars, in order to allow proper address translation during
+ * resource assignment to enable guest virtualization
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
+
+ /*
+ * Device may provide root port configuration information which limits
+ * bus numbering
+ */
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+
+ /*
+ * Device contains physical location shadow registers in
+ * vendor-specific capability space
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
+
+ /*
+ * Device may use MSI-X vector 0 for software triggering and will not
+ * be used for MSI remapping
+ */
+ VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
+
+ /*
+ * Device can bypass remapping MSI-X transactions into its MSI-X table,
+ * avoiding the requirement of a VMD MSI domain for child device
+ * interrupt handling.
+ */
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
+
+ /*
+ * Enable ASPM on the PCIE root ports and set the default LTR of the
+ * storage devices on platforms where these values are not configured by
+ * BIOS. This is needed for laptops, which require these settings for
+ * proper power management of the SoC.
+ */
+ VMD_FEAT_BIOS_PM_QUIRK = (1 << 5),
+};
+
+#define VMD_BIOS_PM_QUIRK_LTR 0x1003 /* 3145728 ns */
+
+#define VMD_FEATS_CLIENT (VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | \
+ VMD_FEAT_HAS_BUS_RESTRICTIONS | \
+ VMD_FEAT_OFFSET_FIRST_VECTOR | \
+ VMD_FEAT_BIOS_PM_QUIRK)
+
+static DEFINE_IDA(vmd_instance_ida);
+
+/*
+ * Lock for manipulating VMD IRQ lists.
+ */
+static DEFINE_RAW_SPINLOCK(list_lock);
+
+/**
+ * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
+ * @node: list item for parent traversal.
+ * @irq: back pointer to parent.
+ * @enabled: true if driver enabled IRQ
+ * @virq: the virtual IRQ value provided to the requesting driver.
+ *
+ * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
+ * a VMD IRQ using this structure.
+ */
+struct vmd_irq {
+ struct list_head node;
+ struct vmd_irq_list *irq;
+ bool enabled;
+ unsigned int virq;
+};
+
+/**
+ * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
+ * @irq_list: the list of irq's the VMD one demuxes to.
+ * @srcu: SRCU struct for local synchronization.
+ * @count: number of child IRQs assigned to this vector; used to track
+ * sharing.
+ * @virq: The underlying VMD Linux interrupt number
+ */
+struct vmd_irq_list {
+ struct list_head irq_list;
+ struct srcu_struct srcu;
+ unsigned int count;
+ unsigned int virq;
+};
+
+struct vmd_dev {
+ struct pci_dev *dev;
+
+ spinlock_t cfg_lock;
+ void __iomem *cfgbar;
+
+ int msix_count;
+ struct vmd_irq_list *irqs;
+
+ struct pci_sysdata sysdata;
+ struct resource resources[3];
+ struct irq_domain *irq_domain;
+ struct pci_bus *bus;
+ u8 busn_start;
+ u8 first_vec;
+ char *name;
+ int instance;
+};
+
+static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
+{
+ return container_of(bus->sysdata, struct vmd_dev, sysdata);
+}
+
+static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
+ struct vmd_irq_list *irqs)
+{
+ return irqs - vmd->irqs;
+}
+
+/*
+ * Drivers managing a device in a VMD domain allocate their own IRQs as before,
+ * but the MSI entry for the hardware it's driving will be programmed with a
+ * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
+ * domain into one of its own, and the VMD driver de-muxes these for the
+ * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
+ * and irq_chip to set this up.
+ */
+static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ struct vmd_irq_list *irq = vmdirq->irq;
+ struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
+
+ memset(msg, 0, sizeof(*msg));
+ msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
+ msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
+}
+
+/*
+ * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
+ */
+static void vmd_irq_enable(struct irq_data *data)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ data->chip->irq_unmask(data);
+}
+
+static void vmd_irq_disable(struct irq_data *data)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
+
+ data->chip->irq_mask(data);
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+}
+
+/*
+ * XXX: Stubbed until we develop acceptable way to not create conflicts with
+ * other devices sharing the same vector.
+ */
+static int vmd_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip vmd_msi_controller = {
+ .name = "VMD-MSI",
+ .irq_enable = vmd_irq_enable,
+ .irq_disable = vmd_irq_disable,
+ .irq_compose_msi_msg = vmd_compose_msi_msg,
+ .irq_set_affinity = vmd_irq_set_affinity,
+};
+
+static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
+ msi_alloc_info_t *arg)
+{
+ return 0;
+}
+
+/*
+ * XXX: We can be even smarter selecting the best IRQ once we solve the
+ * affinity problem.
+ */
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
+{
+ unsigned long flags;
+ int i, best;
+
+ if (vmd->msix_count == 1 + vmd->first_vec)
+ return &vmd->irqs[vmd->first_vec];
+
+ /*
+ * White list for fast-interrupt handlers. All others will share the
+ * "slow" interrupt vector.
+ */
+ switch (msi_desc_to_pci_dev(desc)->class) {
+ case PCI_CLASS_STORAGE_EXPRESS:
+ break;
+ default:
+ return &vmd->irqs[vmd->first_vec];
+ }
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ return &vmd->irqs[best];
+}
+
+static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ struct msi_desc *desc = arg->desc;
+ struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
+ struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+
+ if (!vmdirq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq;
+
+ irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ return 0;
+}
+
+static void vmd_msi_free(struct irq_domain *domain,
+ struct msi_domain_info *info, unsigned int virq)
+{
+ struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+ unsigned long flags;
+
+ synchronize_srcu(&vmdirq->irq->srcu);
+
+ /* XXX: Potential optimization to rebalance */
+ raw_spin_lock_irqsave(&list_lock, flags);
+ vmdirq->irq->count--;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ kfree(vmdirq);
+}
+
+static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *arg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+ if (nvec > vmd->msix_count)
+ return vmd->msix_count;
+
+ memset(arg, 0, sizeof(*arg));
+ return 0;
+}
+
+static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+}
+
+static struct msi_domain_ops vmd_msi_domain_ops = {
+ .get_hwirq = vmd_get_hwirq,
+ .msi_init = vmd_msi_init,
+ .msi_free = vmd_msi_free,
+ .msi_prepare = vmd_msi_prepare,
+ .set_desc = vmd_set_desc,
+};
+
+static struct msi_domain_info vmd_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .ops = &vmd_msi_domain_ops,
+ .chip = &vmd_msi_controller,
+};
+
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
+static int vmd_create_irq_domain(struct vmd_dev *vmd)
+{
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
+ if (!fn)
+ return -ENODEV;
+
+ vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vmd_remove_irq_domain(struct vmd_dev *vmd)
+{
+ /*
+ * Some production BIOS won't enable remapping between soft reboots.
+ * Ensure remapping is restored before unloading the driver.
+ */
+ if (!vmd->msix_count)
+ vmd_set_msi_remapping(vmd, true);
+
+ if (vmd->irq_domain) {
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
+
+ irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
+ }
+}
+
+static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
+ unsigned int devfn, int reg, int len)
+{
+ unsigned int busnr_ecam = bus->number - vmd->busn_start;
+ u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
+
+ if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+ return NULL;
+
+ return vmd->cfgbar + offset;
+}
+
+/*
+ * CPU may deadlock if config space is not serialized on some versions of this
+ * hardware, so all config space access is done under a spinlock.
+ */
+static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ int len, u32 *value)
+{
+ struct vmd_dev *vmd = vmd_from_bus(bus);
+ void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!addr)
+ return -EFAULT;
+
+ spin_lock_irqsave(&vmd->cfg_lock, flags);
+ switch (len) {
+ case 1:
+ *value = readb(addr);
+ break;
+ case 2:
+ *value = readw(addr);
+ break;
+ case 4:
+ *value = readl(addr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ return ret;
+}
+
+/*
+ * VMD h/w converts non-posted config writes to posted memory writes. The
+ * read-back in this function forces the completion so it returns only after
+ * the config space was written, as expected.
+ */
+static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ int len, u32 value)
+{
+ struct vmd_dev *vmd = vmd_from_bus(bus);
+ void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!addr)
+ return -EFAULT;
+
+ spin_lock_irqsave(&vmd->cfg_lock, flags);
+ switch (len) {
+ case 1:
+ writeb(value, addr);
+ readb(addr);
+ break;
+ case 2:
+ writew(value, addr);
+ readw(addr);
+ break;
+ case 4:
+ writel(value, addr);
+ readl(addr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ return ret;
+}
+
+static struct pci_ops vmd_ops = {
+ .read = vmd_pci_read,
+ .write = vmd_pci_write,
+};
+
+#ifdef CONFIG_ACPI
+static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
+{
+ struct pci_host_bridge *bridge;
+ u32 busnr, addr;
+
+ if (pci_dev->bus->ops != &vmd_ops)
+ return NULL;
+
+ bridge = pci_find_host_bridge(pci_dev->bus);
+ busnr = pci_dev->bus->number - bridge->bus->number;
+ /*
+ * The address computation below is only applicable to relative bus
+ * numbers below 32.
+ */
+ if (busnr > 31)
+ return NULL;
+
+ addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
+
+ dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
+ addr);
+
+ return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
+ false);
+}
+
+static bool hook_installed;
+
+static void vmd_acpi_begin(void)
+{
+ if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
+ return;
+
+ hook_installed = true;
+}
+
+static void vmd_acpi_end(void)
+{
+ if (!hook_installed)
+ return;
+
+ pci_acpi_clear_companion_lookup_hook();
+ hook_installed = false;
+}
+#else
+static inline void vmd_acpi_begin(void) { }
+static inline void vmd_acpi_end(void) { }
+#endif /* CONFIG_ACPI */
+
+static void vmd_domain_reset(struct vmd_dev *vmd)
+{
+ u16 bus, max_buses = resource_size(&vmd->resources[0]);
+ u8 dev, functions, fn, hdr_type;
+ char __iomem *base;
+
+ for (bus = 0; bus < max_buses; bus++) {
+ for (dev = 0; dev < 32; dev++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE);
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, fn), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+
+ if (hdr_type != PCI_HEADER_TYPE_BRIDGE ||
+ (readw(base + PCI_CLASS_DEVICE) !=
+ PCI_CLASS_BRIDGE_PCI))
+ continue;
+
+ /*
+ * Temporarily disable the I/O range before updating
+ * PCI_IO_BASE.
+ */
+ writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
+ /* Update lower 16 bits of I/O base/limit */
+ writew(0x00f0, base + PCI_IO_BASE);
+ /* Update upper 16 bits of I/O base/limit */
+ writel(0, base + PCI_IO_BASE_UPPER16);
+
+ /* MMIO Base/Limit */
+ writel(0x0000fff0, base + PCI_MEMORY_BASE);
+
+ /* Prefetchable MMIO Base/Limit */
+ writel(0, base + PCI_PREF_LIMIT_UPPER32);
+ writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
+ writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
+ }
+ }
+ }
+}
+
+static void vmd_attach_resources(struct vmd_dev *vmd)
+{
+ vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
+ vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
+}
+
+static void vmd_detach_resources(struct vmd_dev *vmd)
+{
+ vmd->dev->resource[VMD_MEMBAR1].child = NULL;
+ vmd->dev->resource[VMD_MEMBAR2].child = NULL;
+}
+
+/*
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
+ * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
+ * 16 bits are the PCI Segment Group (domain) number. Other bits are
+ * currently reserved.
+ */
+static int vmd_find_free_domain(void)
+{
+ int domain = 0xffff;
+ struct pci_bus *bus = NULL;
+
+ while ((bus = pci_find_next_bus(bus)) != NULL)
+ domain = max_t(int, domain, pci_domain_nr(bus));
+ return domain + 1;
+}
+
+static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
+ resource_size_t *offset1,
+ resource_size_t *offset2)
+{
+ struct pci_dev *dev = vmd->dev;
+ u64 phys1, phys2;
+
+ if (native_hint) {
+ u32 vmlock;
+ int ret;
+
+ ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
+ if (ret || PCI_POSSIBLE_ERROR(vmlock))
+ return -ENODEV;
+
+ if (MB2_SHADOW_EN(vmlock)) {
+ void __iomem *membar2;
+
+ membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
+ if (!membar2)
+ return -ENOMEM;
+ phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
+ phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ pci_iounmap(dev, membar2);
+ } else
+ return 0;
+ } else {
+ /* Hypervisor-Emulated Vendor-Specific Capability */
+ int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ u32 reg, regu;
+
+ pci_read_config_dword(dev, pos + 4, &reg);
+
+ /* "SHDW" */
+ if (pos && reg == 0x53484457) {
+ pci_read_config_dword(dev, pos + 8, &reg);
+ pci_read_config_dword(dev, pos + 12, &regu);
+ phys1 = (u64) regu << 32 | reg;
+
+ pci_read_config_dword(dev, pos + 16, &reg);
+ pci_read_config_dword(dev, pos + 20, &regu);
+ phys2 = (u64) regu << 32 | reg;
+ } else
+ return 0;
+ }
+
+ *offset1 = dev->resource[VMD_MEMBAR1].start -
+ (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
+ *offset2 = dev->resource[VMD_MEMBAR2].start -
+ (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
+
+ return 0;
+}
+
+static int vmd_get_bus_number_start(struct vmd_dev *vmd)
+{
+ struct pci_dev *dev = vmd->dev;
+ u16 reg;
+
+ pci_read_config_word(dev, PCI_REG_VMCAP, &reg);
+ if (BUS_RESTRICT_CAP(reg)) {
+ pci_read_config_word(dev, PCI_REG_VMCONFIG, &reg);
+
+ switch (BUS_RESTRICT_CFG(reg)) {
+ case 0:
+ vmd->busn_start = 0;
+ break;
+ case 1:
+ vmd->busn_start = 128;
+ break;
+ case 2:
+ vmd->busn_start = 224;
+ break;
+ default:
+ pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
+ BUS_RESTRICT_CFG(reg));
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t vmd_irq(int irq, void *data)
+{
+ struct vmd_irq_list *irqs = data;
+ struct vmd_irq *vmdirq;
+ int idx;
+
+ idx = srcu_read_lock(&irqs->srcu);
+ list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
+ generic_handle_irq(vmdirq->virq);
+ srcu_read_unlock(&irqs->srcu, idx);
+
+ return IRQ_HANDLED;
+}
+
+static int vmd_alloc_irqs(struct vmd_dev *vmd)
+{
+ struct pci_dev *dev = vmd->dev;
+ int i, err;
+
+ vmd->msix_count = pci_msix_vec_count(dev);
+ if (vmd->msix_count < 0)
+ return -ENODEV;
+
+ vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
+ vmd->msix_count, PCI_IRQ_MSIX);
+ if (vmd->msix_count < 0)
+ return vmd->msix_count;
+
+ vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
+ GFP_KERNEL);
+ if (!vmd->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < vmd->msix_count; i++) {
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
+ vmd->irqs[i].virq = pci_irq_vector(dev, i);
+ err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
+ vmd_irq, IRQF_NO_THREAD,
+ vmd->name, &vmd->irqs[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Since VMD is an aperture to regular PCIe root ports, only allow it to
+ * control features that the OS is allowed to control on the physical PCI bus.
+ */
+static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
+ struct pci_host_bridge *vmd_bridge)
+{
+ vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug;
+ vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug;
+ vmd_bridge->native_aer = root_bridge->native_aer;
+ vmd_bridge->native_pme = root_bridge->native_pme;
+ vmd_bridge->native_ltr = root_bridge->native_ltr;
+ vmd_bridge->native_dpc = root_bridge->native_dpc;
+}
+
+/*
+ * Enable ASPM and LTR settings on devices that aren't configured by BIOS.
+ */
+static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+{
+ unsigned long features = *(unsigned long *)userdata;
+ u16 ltr = VMD_BIOS_PM_QUIRK_LTR;
+ u32 ltr_reg;
+ int pos;
+
+ if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
+ return 0;
+
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
+ if (!pos)
+ return 0;
+
+ /*
+ * Skip if the max snoop LTR is non-zero, indicating BIOS has set it
+ * so the LTR quirk is not needed.
+ */
+ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
+ if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
+ return 0;
+
+ /*
+ * Set the default values to the maximum required by the platform to
+ * allow the deepest power management savings. Write as a DWORD where
+ * the lower word is the max snoop latency and the upper word is the
+ * max non-snoop latency.
+ */
+ ltr_reg = (ltr << 16) | ltr;
+ pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
+ pci_info(pdev, "VMD: Default LTR value set by driver\n");
+
+ return 0;
+}
+
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+{
+ struct pci_sysdata *sd = &vmd->sysdata;
+ struct resource *res;
+ u32 upper_bits;
+ unsigned long flags;
+ LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000;
+ struct pci_bus *child;
+ struct pci_dev *dev;
+ int ret;
+
+ /*
+ * Shadow registers may exist in certain VMD device ids which allow
+ * guests to correctly assign host physical addresses to the root ports
+ * and child devices. These registers will either return the host value
+ * or 0, depending on an enable bit in the VMD device.
+ */
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
+ ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
+ if (ret)
+ return ret;
+ } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
+ ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Certain VMD devices may have a root port configuration option which
+ * limits the bus range to between 0-127, 128-255, or 224-255
+ */
+ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+ ret = vmd_get_bus_number_start(vmd);
+ if (ret)
+ return ret;
+ }
+
+ res = &vmd->dev->resource[VMD_CFGBAR];
+ vmd->resources[0] = (struct resource) {
+ .name = "VMD CFGBAR",
+ .start = vmd->busn_start,
+ .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
+ .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+ };
+
+ /*
+ * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
+ * put 32-bit resources in the window.
+ *
+ * There's no hardware reason why a 64-bit window *couldn't*
+ * contain a 32-bit resource, but pbus_size_mem() computes the
+ * bridge window size assuming a 64-bit window will contain no
+ * 32-bit resources. __pci_assign_resource() enforces that
+ * artificial restriction to make sure everything will fit.
+ *
+ * The only way we could use a 64-bit non-prefetchable MEMBAR is
+ * if its address is <4GB so that we can convert it to a 32-bit
+ * resource. To be visible to the host OS, all VMD endpoints must
+ * be initially configured by platform BIOS, which includes setting
+ * up these resources. We can assume the device is configured
+ * according to the platform needs.
+ */
+ res = &vmd->dev->resource[VMD_MEMBAR1];
+ upper_bits = upper_32_bits(res->end);
+ flags = res->flags & ~IORESOURCE_SIZEALIGN;
+ if (!upper_bits)
+ flags &= ~IORESOURCE_MEM_64;
+ vmd->resources[1] = (struct resource) {
+ .name = "VMD MEMBAR1",
+ .start = res->start,
+ .end = res->end,
+ .flags = flags,
+ .parent = res,
+ };
+
+ res = &vmd->dev->resource[VMD_MEMBAR2];
+ upper_bits = upper_32_bits(res->end);
+ flags = res->flags & ~IORESOURCE_SIZEALIGN;
+ if (!upper_bits)
+ flags &= ~IORESOURCE_MEM_64;
+ vmd->resources[2] = (struct resource) {
+ .name = "VMD MEMBAR2",
+ .start = res->start + membar2_offset,
+ .end = res->end,
+ .flags = flags,
+ .parent = res,
+ };
+
+ sd->vmd_dev = vmd->dev;
+ sd->domain = vmd_find_free_domain();
+ if (sd->domain < 0)
+ return sd->domain;
+
+ sd->node = pcibus_to_node(vmd->dev->bus);
+
+ /*
+ * Currently MSI remapping must be enabled in guest passthrough mode
+ * due to some missing interrupt remapping plumbing. This is probably
+ * acceptable because the guest is usually CPU-limited and MSI
+ * remapping doesn't become a performance bottleneck.
+ */
+ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
+ offset[0] || offset[1]) {
+ ret = vmd_alloc_irqs(vmd);
+ if (ret)
+ return ret;
+
+ vmd_set_msi_remapping(vmd, true);
+
+ ret = vmd_create_irq_domain(vmd);
+ if (ret)
+ return ret;
+
+ /*
+ * Override the IRQ domain bus token so the domain can be
+ * distinguished from a regular PCI/MSI domain.
+ */
+ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
+ } else {
+ vmd_set_msi_remapping(vmd, false);
+ }
+
+ pci_add_resource(&resources, &vmd->resources[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+ &vmd_ops, sd, &resources);
+ if (!vmd->bus) {
+ pci_free_resource_list(&resources);
+ vmd_remove_irq_domain(vmd);
+ return -ENODEV;
+ }
+
+ vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
+ to_pci_host_bridge(vmd->bus->bridge));
+
+ vmd_attach_resources(vmd);
+ if (vmd->irq_domain)
+ dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ else
+ dev_set_msi_domain(&vmd->bus->dev,
+ dev_get_msi_domain(&vmd->dev->dev));
+
+ vmd_acpi_begin();
+
+ pci_scan_child_bus(vmd->bus);
+ vmd_domain_reset(vmd);
+
+ /* When Intel VMD is enabled, the OS does not discover the Root Ports
+ * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies
+ * a reset to the parent of the PCI device supplied as argument. This
+ * is why we pass a child device, so the reset can be triggered at
+ * the Intel bridge level and propagated to all the children in the
+ * hierarchy.
+ */
+ list_for_each_entry(child, &vmd->bus->children, node) {
+ if (!list_empty(&child->devices)) {
+ dev = list_first_entry(&child->devices,
+ struct pci_dev, bus_list);
+ ret = pci_reset_bus(dev);
+ if (ret)
+ pci_warn(dev, "can't reset device: %d\n", ret);
+
+ break;
+ }
+ }
+
+ pci_assign_unassigned_bus_resources(vmd->bus);
+
+ pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features);
+
+ /*
+ * VMD root buses are virtual and don't return true on pci_is_pcie()
+ * and will fail pcie_bus_configure_settings() early. It can instead be
+ * run on each of the real root ports.
+ */
+ list_for_each_entry(child, &vmd->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(vmd->bus);
+
+ vmd_acpi_end();
+
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+ return 0;
+}
+
+static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ unsigned long features = (unsigned long) id->driver_data;
+ struct vmd_dev *vmd;
+ int err;
+
+ if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
+ return -ENOMEM;
+
+ vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
+ if (!vmd)
+ return -ENOMEM;
+
+ vmd->dev = dev;
+ vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
+ if (vmd->instance < 0)
+ return vmd->instance;
+
+ vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
+ vmd->instance);
+ if (!vmd->name) {
+ err = -ENOMEM;
+ goto out_release_instance;
+ }
+
+ err = pcim_enable_device(dev);
+ if (err < 0)
+ goto out_release_instance;
+
+ vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
+ if (!vmd->cfgbar) {
+ err = -ENOMEM;
+ goto out_release_instance;
+ }
+
+ pci_set_master(dev);
+ if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
+ err = -ENODEV;
+ goto out_release_instance;
+ }
+
+ if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
+ vmd->first_vec = 1;
+
+ spin_lock_init(&vmd->cfg_lock);
+ pci_set_drvdata(dev, vmd);
+ err = vmd_enable_domain(vmd, features);
+ if (err)
+ goto out_release_instance;
+
+ dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
+ vmd->sysdata.domain);
+ return 0;
+
+ out_release_instance:
+ ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ return err;
+}
+
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
+static void vmd_remove(struct pci_dev *dev)
+{
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
+
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+ pci_stop_root_bus(vmd->bus);
+ pci_remove_root_bus(vmd->bus);
+ vmd_cleanup_srcu(vmd);
+ vmd_detach_resources(vmd);
+ vmd_remove_irq_domain(vmd);
+ ida_simple_remove(&vmd_instance_ida, vmd->instance);
+}
+
+static void vmd_shutdown(struct pci_dev *dev)
+{
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
+
+ vmd_remove_irq_domain(vmd);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vmd_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
+
+ return 0;
+}
+
+static int vmd_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ int err, i;
+
+ if (vmd->irq_domain)
+ vmd_set_msi_remapping(vmd, true);
+ else
+ vmd_set_msi_remapping(vmd, false);
+
+ for (i = 0; i < vmd->msix_count; i++) {
+ err = devm_request_irq(dev, vmd->irqs[i].virq,
+ vmd_irq, IRQF_NO_THREAD,
+ vmd->name, &vmd->irqs[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
+
+static const struct pci_device_id vmd_ids[] = {
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
+ {PCI_VDEVICE(INTEL, 0x467f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x4c3d),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xa77f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x7d0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xad0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, vmd_ids);
+
+static struct pci_driver vmd_drv = {
+ .name = "vmd",
+ .id_table = vmd_ids,
+ .probe = vmd_probe,
+ .remove = vmd_remove,
+ .shutdown = vmd_shutdown,
+ .driver = {
+ .pm = &vmd_dev_pm_ops,
+ },
+};
+module_pci_driver(vmd_drv);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.6");