summaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/irqchip
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/Kconfig699
-rw-r--r--drivers/irqchip/Makefile123
-rw-r--r--drivers/irqchip/alphascale_asm9260-icoll.h105
-rw-r--r--drivers/irqchip/exynos-combiner.c272
-rw-r--r--drivers/irqchip/irq-al-fic.c287
-rw-r--r--drivers/irqchip/irq-alpine-msi.c292
-rw-r--r--drivers/irqchip/irq-apple-aic.c1199
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c826
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c110
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c237
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c221
-rw-r--r--drivers/irqchip/irq-ath79-cpu.c94
-rw-r--r--drivers/irqchip/irq-ath79-misc.c197
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c275
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h40
-rw-r--r--drivers/irqchip/irq-atmel-aic.c274
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c408
-rw-r--r--drivers/irqchip/irq-bcm2835.c263
-rw-r--r--drivers/irqchip/irq-bcm2836.c343
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c355
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c462
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c365
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c296
-rw-r--r--drivers/irqchip/irq-clps711x.c234
-rw-r--r--drivers/irqchip/irq-crossbar.c366
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c281
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c281
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c163
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c260
-rw-r--r--drivers/irqchip/irq-digicolor.c119
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c218
-rw-r--r--drivers/irqchip/irq-ftintc010.c194
-rw-r--r--drivers/irqchip/irq-gic-common.c150
-rw-r--r--drivers/irqchip/irq-gic-common.h32
-rw-r--r--drivers/irqchip/irq-gic-pm.c160
-rw-r--r--drivers/irqchip/irq-gic-realview.c77
-rw-r--r--drivers/irqchip/irq-gic-v2m.c577
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c170
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c202
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c163
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c5619
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c335
-rw-r--r--drivers/irqchip/irq-gic-v3.c2577
-rw-r--r--drivers/irqchip/irq-gic-v4.c392
-rw-r--r--drivers/irqchip/irq-gic.c1764
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c134
-rw-r--r--drivers/irqchip/irq-hip04.c406
-rw-r--r--drivers/irqchip/irq-i8259.c361
-rw-r--r--drivers/irqchip/irq-idt3243x.c122
-rw-r--r--drivers/irqchip/irq-imgpdc.c490
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c290
-rw-r--r--drivers/irqchip/irq-imx-intmux.c363
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c316
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c453
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c186
-rw-r--r--drivers/irqchip/irq-ingenic.c160
-rw-r--r--drivers/irqchip/irq-ixp4xx.c285
-rw-r--r--drivers/irqchip/irq-jcore-aic.c120
-rw-r--r--drivers/irqchip/irq-keystone.c228
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c154
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c416
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c146
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c218
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c378
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c205
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c293
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c378
-rw-r--r--drivers/irqchip/irq-lpc32xx.c242
-rw-r--r--drivers/irqchip/irq-ls-extirq.c232
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c433
-rw-r--r--drivers/irqchip/irq-ls1x.c193
-rw-r--r--drivers/irqchip/irq-madera.c254
-rw-r--r--drivers/irqchip/irq-mbigen.c393
-rw-r--r--drivers/irqchip/irq-mchp-eic.c280
-rw-r--r--drivers/irqchip/irq-meson-gpio.c602
-rw-r--r--drivers/irqchip/irq-mips-cpu.c288
-rw-r--r--drivers/irqchip/irq-mips-gic.c855
-rw-r--r--drivers/irqchip/irq-mmp.c551
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c218
-rw-r--r--drivers/irqchip/irq-mst-intc.c291
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c298
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c235
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c263
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c409
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c235
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c200
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c500
-rw-r--r--drivers/irqchip/irq-mxs.c239
-rw-r--r--drivers/irqchip/irq-nvic.c136
-rw-r--r--drivers/irqchip/irq-omap-intc.c394
-rw-r--r--drivers/irqchip/irq-ompic.c202
-rw-r--r--drivers/irqchip/irq-or1k-pic.c176
-rw-r--r--drivers/irqchip/irq-orion.c206
-rw-r--r--drivers/irqchip/irq-owl-sirq.c359
-rw-r--r--drivers/irqchip/irq-partition-percpu.c241
-rw-r--r--drivers/irqchip/irq-pic32-evic.c319
-rw-r--r--drivers/irqchip/irq-pruss-intc.c661
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c461
-rw-r--r--drivers/irqchip/irq-rda-intc.c107
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c174
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c611
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c273
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c284
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c393
-rw-r--r--drivers/irqchip/irq-riscv-intc.c147
-rw-r--r--drivers/irqchip/irq-sa11x0.c171
-rw-r--r--drivers/irqchip/irq-sifive-plic.c490
-rw-r--r--drivers/irqchip/irq-sl28cpld.c96
-rw-r--r--drivers/irqchip/irq-sni-exiu.c323
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c278
-rw-r--r--drivers/irqchip/irq-st.c203
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1027
-rw-r--r--drivers/irqchip/irq-sun4i.c202
-rw-r--r--drivers/irqchip/irq-sun6i-r.c381
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c241
-rw-r--r--drivers/irqchip/irq-tb10x.c185
-rw-r--r--drivers/irqchip/irq-tegra.c359
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c746
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c306
-rw-r--r--drivers/irqchip/irq-ts4800.c169
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c252
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c246
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c234
-rw-r--r--drivers/irqchip/irq-vic.c516
-rw-r--r--drivers/irqchip/irq-vt8500.c244
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c162
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c250
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c185
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c109
-rw-r--r--drivers/irqchip/irq-zevio.c120
-rw-r--r--drivers/irqchip/irqchip.c64
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c276
-rw-r--r--drivers/irqchip/qcom-pdc.c341
-rw-r--r--drivers/irqchip/spear-shirq.c290
134 files changed, 49000 insertions, 0 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
new file mode 100644
index 000000000..a29a426e4
--- /dev/null
+++ b/drivers/irqchip/Kconfig
@@ -0,0 +1,699 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "IRQ chip support"
+
+config IRQCHIP
+ def_bool y
+ depends on (OF_IRQ || ACPI_GENERIC_GSI)
+
+config ARM_GIC
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config ARM_GIC_PM
+ bool
+ depends on PM
+ select ARM_GIC
+
+config ARM_GIC_MAX_NR
+ int
+ depends on ARM_GIC
+ default 2 if ARCH_REALVIEW
+ default 1
+
+config ARM_GIC_V2M
+ bool
+ depends on PCI
+ select ARM_GIC
+ select PCI_MSI
+
+config GIC_NON_BANKED
+ bool
+
+config ARM_GIC_V3
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select PARTITION_PERCPU
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+ select HAVE_ARM_SMCCC_DISCOVERY
+
+config ARM_GIC_V3_ITS
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+ bool
+ depends on ARM_GIC_V3_ITS
+ depends on PCI
+ depends on PCI_MSI
+ default ARM_GIC_V3_ITS
+
+config ARM_GIC_V3_ITS_FSL_MC
+ bool
+ depends on ARM_GIC_V3_ITS
+ depends on FSL_MC_BUS
+ default ARM_GIC_V3_ITS
+
+config ARM_NVIC
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_CHIP
+
+config ARM_VIC
+ bool
+ select IRQ_DOMAIN
+
+config ARM_VIC_NR
+ int
+ default 4 if ARCH_S5PV210
+ default 2
+ depends on ARM_VIC
+ help
+ The maximum number of VICs available in the system, for
+ power management.
+
+config ARMADA_370_XP_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select PCI_MSI if PCI
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config ALPINE_MSI
+ bool
+ depends on PCI
+ select PCI_MSI
+ select GENERIC_IRQ_CHIP
+
+config AL_FIC
+ bool "Amazon's Annapurna Labs Fabric Interrupt Controller"
+ depends on OF || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Support Amazon's Annapurna Labs Fabric Interrupt Controller.
+
+config ATMEL_AIC_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select SPARSE_IRQ
+
+config ATMEL_AIC5_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select SPARSE_IRQ
+
+config I8259
+ bool
+ select IRQ_DOMAIN
+
+config BCM6345_L1_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config BCM7038_L1_IRQ
+ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config BCM7120_L2_IRQ
+ tristate "Broadcom STB 7120-style L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config BRCMSTB_L2_IRQ
+ tristate "Broadcom STB generic L2 interrupt controller driver"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config DAVINCI_AINTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config DAVINCI_CP_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config DW_APB_ICTL
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+
+config FARADAY_FTINTC010
+ bool
+ select IRQ_DOMAIN
+ select SPARSE_IRQ
+
+config HISILICON_IRQ_MBIGEN
+ bool
+ select ARM_GIC_V3
+ select ARM_GIC_V3_ITS
+
+config IMGPDC_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config IXP4XX_IRQ
+ bool
+ select IRQ_DOMAIN
+ select SPARSE_IRQ
+
+config MADERA_IRQ
+ tristate
+
+config IRQ_MIPS_CPU
+ bool
+ select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config CLPS711X_IRQCHIP
+ bool
+ depends on ARCH_CLPS711X
+ select IRQ_DOMAIN
+ select SPARSE_IRQ
+ default y
+
+config OMPIC
+ bool
+
+config OR1K_PIC
+ bool
+ select IRQ_DOMAIN
+
+config OMAP_IRQCHIP
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config ORION_IRQCHIP
+ bool
+ select IRQ_DOMAIN
+
+config PIC32_EVIC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config JCORE_AIC
+ bool "J-Core integrated AIC" if COMPILE_TEST
+ depends on OF
+ select IRQ_DOMAIN
+ help
+ Support for the J-Core integrated AIC.
+
+config RDA_INTC
+ bool
+ select IRQ_DOMAIN
+
+config RENESAS_INTC_IRQPIN
+ bool "Renesas INTC External IRQ Pin Support" if COMPILE_TEST
+ select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ interrupt pins, as found on SH/R-Mobile and R-Car Gen1 SoCs.
+
+config RENESAS_IRQC
+ bool "Renesas R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} IRQC support" if COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ devices, as found on R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} SoCs.
+
+config RENESAS_RZA1_IRQC
+ bool "Renesas RZ/A1 IRQC support" if COMPILE_TEST
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/A1 Interrupt Controller, to use up
+ to 8 external interrupts with configurable sense select.
+
+config RENESAS_RZG2L_IRQC
+ bool "Renesas RZ/G2L (and alike SoC) IRQC support" if COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller
+ for external devices.
+
+config SL28CPLD_INTC
+ bool "Kontron sl28cpld IRQ controller"
+ depends on MFD_SL28CPLD=y || COMPILE_TEST
+ select REGMAP_IRQ
+ help
+ Interrupt controller driver for the board management controller
+ found on the Kontron sl28 CPLD.
+
+config ST_IRQCHIP
+ bool
+ select REGMAP
+ select MFD_SYSCON
+ help
+ Enables SysCfg Controlled IRQs on STi based platforms.
+
+config SUN4I_INTC
+ bool
+
+config SUN6I_R_INTC
+ bool
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+
+config SUNXI_NMI_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+
+config TB10X_IRQC
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+
+config TS4800_IRQ
+ tristate "TS-4800 IRQ controller"
+ select IRQ_DOMAIN
+ depends on HAS_IOMEM
+ depends on SOC_IMX51 || COMPILE_TEST
+ help
+ Support for the TS-4800 FPGA IRQ controller
+
+config VERSATILE_FPGA_IRQ
+ bool
+ select IRQ_DOMAIN
+
+config VERSATILE_FPGA_IRQ_NR
+ int
+ default 4
+ depends on VERSATILE_FPGA_IRQ
+
+config XTENSA_MX
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+
+config XILINX_INTC
+ bool "Xilinx Interrupt Controller IP"
+ depends on OF_ADDRESS
+ select IRQ_DOMAIN
+ help
+ Support for the Xilinx Interrupt Controller IP core.
+ This is used as a primary controller with MicroBlaze and can also
+ be used as a secondary chained controller on other platforms.
+
+config IRQ_CROSSBAR
+ bool
+ help
+ Support for a CROSSBAR ip that precedes the main interrupt controller.
+ The primary irqchip invokes the crossbar's callback which inturn allocates
+ a free irq and configures the IP. Thus the peripheral interrupts are
+ routed to one of the free irqchip interrupt lines.
+
+config KEYSTONE_IRQ
+ tristate "Keystone 2 IRQ controller IP"
+ depends on ARCH_KEYSTONE
+ help
+ Support for Texas Instruments Keystone 2 IRQ controller IP which
+ is part of the Keystone 2 IPC mechanism
+
+config MIPS_GIC
+ bool
+ select GENERIC_IRQ_IPI if SMP
+ select IRQ_DOMAIN_HIERARCHY
+ select MIPS_CM
+
+config INGENIC_IRQ
+ bool
+ depends on MACH_INGENIC
+ default y
+
+config INGENIC_TCU_IRQ
+ bool "Ingenic JZ47xx TCU interrupt controller"
+ default MACH_INGENIC
+ depends on MIPS || COMPILE_TEST
+ select MFD_SYSCON
+ select GENERIC_IRQ_CHIP
+ help
+ Support for interrupts in the Timer/Counter Unit (TCU) of the Ingenic
+ JZ47xx SoCs.
+
+ If unsure, say N.
+
+config IMX_GPCV2
+ bool
+ select IRQ_DOMAIN
+ help
+ Enables the wakeup IRQs for IMX platforms with GPCv2 block
+
+config IRQ_MXS
+ def_bool y if MACH_ASM9260 || ARCH_MXS
+ select IRQ_DOMAIN
+ select STMP_DEVICE
+
+config MSCC_OCELOT_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+
+config MVEBU_GICP
+ bool
+
+config MVEBU_ICU
+ bool
+
+config MVEBU_ODMI
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
+
+config MVEBU_PIC
+ bool
+
+config MVEBU_SEI
+ bool
+
+config LS_EXTIRQ
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select MFD_SYSCON
+
+config LS_SCFG_MSI
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ depends on PCI && PCI_MSI
+
+config PARTITION_PERCPU
+ bool
+
+config STM32_EXTI
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+
+config QCOM_IRQ_COMBINER
+ bool "QCOM IRQ combiner support"
+ depends on ARCH_QCOM && ACPI
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Qualcomm Technologies chips.
+
+config IRQ_UNIPHIER_AIDET
+ bool "UniPhier AIDET support" if COMPILE_TEST
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ default ARCH_UNIPHIER
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the UniPhier AIDET (ARM Interrupt Detector).
+
+config MESON_IRQ_GPIO
+ tristate "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support Meson SoC Family GPIO Interrupt Multiplexer
+
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
+config QCOM_PDC
+ tristate "QCOM PDC"
+ depends on ARCH_QCOM
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Power Domain Controller driver to manage and configure wakeup
+ IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+
+config QCOM_MPM
+ tristate "QCOM MPM"
+ depends on ARCH_QCOM
+ depends on MAILBOX
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ MSM Power Manager driver to manage and configure wakeup
+ IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
+
+config CSKY_MPINTC
+ bool
+ depends on CSKY
+ help
+ Say yes here to enable C-SKY SMP interrupt controller driver used
+ for C-SKY SMP system.
+ In fact it's not mmio map in hardware and it uses ld/st to visit the
+ controller's register inside CPU.
+
+config CSKY_APB_INTC
+ bool "C-SKY APB Interrupt Controller"
+ depends on CSKY
+ help
+ Say yes here to enable C-SKY APB interrupt controller driver used
+ by C-SKY single core SOC system. It uses mmio map apb-bus to visit
+ the controller's register.
+
+config IMX_IRQSTEER
+ bool "i.MX IRQSTEER support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+
+config IMX_INTMUX
+ bool "i.MX INTMUX support" if COMPILE_TEST
+ default y if ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX INTMUX interrupt multiplexer.
+
+config IMX_MU_MSI
+ tristate "i.MX MU used as MSI controller"
+ depends on OF && HAS_IOMEM
+ depends on ARCH_MXC || COMPILE_TEST
+ default m if ARCH_MXC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Provide a driver for the i.MX Messaging Unit block used as a
+ CPU-to-CPU MSI controller. This requires a specially crafted DT
+ to make use of this driver.
+
+ If unsure, say N
+
+config LS1X_IRQ
+ bool "Loongson-1 Interrupt Controller"
+ depends on MACH_LOONGSON32
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson-1 platform Interrupt Controller.
+
+config TI_SCI_INTR_IRQCHIP
+ bool
+ depends on TI_SCI_PROTOCOL
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This enables the irqchip driver support for K3 Interrupt router
+ over TI System Control Interface available on some new TI's SoCs.
+ If you wish to use interrupt router irq resources managed by the
+ TI System Controller, say Y here. Otherwise, say N.
+
+config TI_SCI_INTA_IRQCHIP
+ bool
+ depends on TI_SCI_PROTOCOL
+ select IRQ_DOMAIN_HIERARCHY
+ select TI_SCI_INTA_MSI_DOMAIN
+ help
+ This enables the irqchip driver support for K3 Interrupt aggregator
+ over TI System Control Interface available on some new TI's SoCs.
+ If you wish to use interrupt aggregator irq resources managed by the
+ TI System Controller, say Y here. Otherwise, say N.
+
+config TI_PRUSS_INTC
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
+ select IRQ_DOMAIN
+ help
+ This enables support for the PRU-ICSS Local Interrupt Controller
+ present within a PRU-ICSS subsystem present on various TI SoCs.
+ The PRUSS INTC enables various interrupts to be routed to multiple
+ different processors within the SoC.
+
+config RISCV_INTC
+ bool "RISC-V Local Interrupt Controller"
+ depends on RISCV
+ default y
+ help
+ This enables support for the per-HART local interrupt controller
+ found in standard RISC-V systems. The per-HART local interrupt
+ controller handles timer interrupts, software interrupts, and
+ hardware interrupts. Without a per-HART local interrupt controller,
+ a RISC-V system will be unable to handle any interrupts.
+
+ If you don't know what to do here, say Y.
+
+config SIFIVE_PLIC
+ bool "SiFive Platform-Level Interrupt Controller"
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+ help
+ This enables support for the PLIC chip found in SiFive (and
+ potentially other) RISC-V systems. The PLIC controls devices
+ interrupts and connects them to each core's local interrupt
+ controller. Aside from timer and software interrupts, all other
+ interrupt sources are subordinate to the PLIC.
+
+ If you don't know what to do here, say Y.
+
+config EXYNOS_IRQ_COMBINER
+ bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Samsung Exynos chips.
+
+config IRQ_LOONGARCH_CPU
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+ select LOONGSON_PCH_PIC
+ select LOONGSON_PCH_MSI
+ select LOONGSON_PCH_LPC
+ help
+ Support for the LoongArch CPU Interrupt Controller. For details of
+ irq chip hierarchy on LoongArch platforms please read the document
+ Documentation/loongarch/irq-chip-model.rst.
+
+config LOONGSON_LIOINTC
+ bool "Loongson Local I/O Interrupt Controller"
+ depends on MACH_LOONGSON64
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson Local I/O Interrupt Controller.
+
+config LOONGSON_EIOINTC
+ bool "Loongson Extend I/O Interrupt Controller"
+ depends on LOONGARCH
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson3 Extend I/O Interrupt Vector Controller.
+
+config LOONGSON_HTPIC
+ bool "Loongson3 HyperTransport PIC Controller"
+ depends on MACH_LOONGSON64 && MIPS
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson-3 HyperTransport PIC Controller.
+
+config LOONGSON_HTVEC
+ bool "Loongson HyperTransport Interrupt Vector Controller"
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the Loongson HyperTransport Interrupt Vector Controller.
+
+config LOONGSON_PCH_PIC
+ bool "Loongson PCH PIC Controller"
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Support for the Loongson PCH PIC Controller.
+
+config LOONGSON_PCH_MSI
+ bool "Loongson PCH MSI Controller"
+ depends on MACH_LOONGSON64
+ depends on PCI
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select PCI_MSI
+ help
+ Support for the Loongson PCH MSI Controller.
+
+config LOONGSON_PCH_LPC
+ bool "Loongson PCH LPC Controller"
+ depends on LOONGARCH
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the Loongson PCH LPC Controller.
+
+config MST_IRQ
+ bool "MStar Interrupt Controller"
+ depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
+ default ARCH_MEDIATEK
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support MStar Interrupt Controller.
+
+config WPCM450_AIC
+ bool "Nuvoton WPCM450 Advanced Interrupt Controller"
+ depends on ARCH_WPCM450
+ help
+ Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC.
+
+config IRQ_IDT3243X
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config APPLE_AIC
+ bool "Apple Interrupt Controller (AIC)"
+ depends on ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
+ such as the M1.
+
+config MCHP_EIC
+ bool "Microchip External Interrupt Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for Microchip External Interrupt Controller.
+
+config SUNPLUS_SP7021_INTC
+ bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
+ default SOC_SP7021
+ help
+ Support for the Sunplus SP7021 Interrupt Controller IP core.
+ SP7021 SoC has 2 Chips: C-Chip & P-Chip. This is used as a
+ chained controller, routing all interrupt source in P-Chip to
+ the primary controller on C-Chip.
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
new file mode 100644
index 000000000..87b49a109
--- /dev/null
+++ b/drivers/irqchip/Makefile
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IRQCHIP) += irqchip.o
+
+obj-$(CONFIG_AL_FIC) += irq-al-fic.o
+obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
+obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
+obj-$(CONFIG_ATH79) += irq-ath79-misc.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
+obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
+obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
+obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
+obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
+obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
+obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
+obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
+obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
+obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
+obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
+obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
+obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
+obj-$(CONFIG_OMPIC) += irq-ompic.o
+obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
+obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
+obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
+obj-$(CONFIG_SUN4I_INTC) += irq-sun4i.o
+obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o
+obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
+obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
+obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
+obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
+obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
+obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
+obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
+obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
+obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_ARMADA_370_XP_IRQ) += irq-armada-370-xp.o
+obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
+obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
+obj-$(CONFIG_I8259) += irq-i8259.o
+obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
+obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
+obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
+obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
+obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
+obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
+obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
+obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
+obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
+obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
+obj-$(CONFIG_ST_IRQCHIP) += irq-st.o
+obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
+obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
+obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
+obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
+obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
+obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
+obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
+obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
+obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
+obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
+obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
+obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o irq-mtk-cirq.o
+obj-$(CONFIG_ARCH_DIGICOLOR) += irq-digicolor.o
+obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
+obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
+obj-$(CONFIG_INGENIC_TCU_IRQ) += irq-ingenic-tcu.o
+obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
+obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
+obj-$(CONFIG_MSCC_OCELOT_IRQ) += irq-mscc-ocelot.o
+obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o
+obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
+obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
+obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
+obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
+obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
+obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
+obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
+obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
+obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
+obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
+obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
+obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
+obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
+obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
+obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
+obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
+obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
+obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
+obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
+obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
+obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
+obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
+obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
+obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
+obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
+obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
+obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
+obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
+obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
+obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
+obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o
+obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
+obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
+obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
+obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
+obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
+obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
+obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h
new file mode 100644
index 000000000..1ec59483a
--- /dev/null
+++ b/drivers/irqchip/alphascale_asm9260-icoll.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ */
+
+#ifndef _ALPHASCALE_ASM9260_ICOLL_H
+#define _ALPHASCALE_ASM9260_ICOLL_H
+
+#define ASM9260_NUM_IRQS 64
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+
+#define ASM9260_HW_ICOLL_VECTOR 0x0000
+/*
+ * bits 31:2
+ * This register presents the vector address for the interrupt currently
+ * active on the CPU IRQ input. Writing to this register notifies the
+ * interrupt collector that the interrupt service routine for the current
+ * interrupt has been entered.
+ * The exception trap should have a LDPC instruction from this address:
+ * LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018
+ */
+
+/*
+ * The Interrupt Collector Level Acknowledge Register is used by software to
+ * indicate the completion of an interrupt on a specific level.
+ * This register is written at the very end of an interrupt service routine. If
+ * nesting is used then the CPU irq must be turned on before writing to this
+ * register to avoid a race condition in the CPU interrupt hardware.
+ */
+#define ASM9260_HW_ICOLL_LEVELACK 0x0010
+#define ASM9260_BM_LEVELn(nr) BIT(nr)
+
+#define ASM9260_HW_ICOLL_CTRL 0x0020
+/*
+ * ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on
+ * asm9260.
+ */
+#define ASM9260_BM_CTRL_SFTRST BIT(31)
+#define ASM9260_BM_CTRL_CLKGATE BIT(30)
+/* disable interrupt level nesting */
+#define ASM9260_BM_CTRL_NO_NESTING BIT(19)
+/*
+ * Set this bit to one enable the RISC32-style read side effect associated with
+ * the vector address register. In this mode, interrupt in-service is signaled
+ * by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt
+ * vector address. Set this bit to zero for normal operation, in which the ISR
+ * signals in-service explicitly by means of a write to the
+ * ASM9260_HW_ICOLL_VECTOR register.
+ * 0 - Must Write to Vector register to go in-service.
+ * 1 - Go in-service as a read side effect
+ */
+#define ASM9260_BM_CTRL_ARM_RSE_MODE BIT(18)
+#define ASM9260_BM_CTRL_IRQ_ENABLE BIT(16)
+
+#define ASM9260_HW_ICOLL_STAT_OFFSET 0x0030
+/*
+ * bits 5:0
+ * Vector number of current interrupt. Multiply by 4 and add to vector base
+ * address to obtain the value in ASM9260_HW_ICOLL_VECTOR.
+ */
+
+/*
+ * RAW0 and RAW1 provides a read-only view of the raw interrupt request lines
+ * coming from various parts of the chip. Its purpose is to improve diagnostic
+ * observability.
+ */
+#define ASM9260_HW_ICOLL_RAW0 0x0040
+#define ASM9260_HW_ICOLL_RAW1 0x0050
+
+#define ASM9260_HW_ICOLL_INTERRUPT0 0x0060
+#define ASM9260_HW_ICOLL_INTERRUPTn(n) (0x0060 + ((n) >> 2) * 0x10)
+/*
+ * WARNING: Modifying the priority of an enabled interrupt may result in
+ * undefined behavior.
+ */
+#define ASM9260_BM_INT_PRIORITY_MASK 0x3
+#define ASM9260_BM_INT_ENABLE BIT(2)
+#define ASM9260_BM_INT_SOFTIRQ BIT(3)
+
+#define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n) (((n) & 0x3) << 3)
+#define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n) (1 << (2 + \
+ ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)))
+
+#define ASM9260_HW_ICOLL_VBASE 0x0160
+/*
+ * bits 31:2
+ * This bitfield holds the upper 30 bits of the base address of the vector
+ * table.
+ */
+
+#define ASM9260_HW_ICOLL_CLEAR0 0x01d0
+#define ASM9260_HW_ICOLL_CLEAR1 0x01e0
+#define ASM9260_HW_ICOLL_CLEARn(n) (((n >> 5) * 0x10) \
+ + SET_REG)
+#define ASM9260_BM_CLEAR_BIT(n) BIT(n & 0x1f)
+
+/* Scratchpad */
+#define ASM9260_HW_ICOLL_UNDEF_VECTOR 0x01f0
+#endif
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
new file mode 100644
index 000000000..552aa04ff
--- /dev/null
+++ b/drivers/irqchip/exynos-combiner.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Combiner irqchip for EXYNOS
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define COMBINER_ENABLE_SET 0x0
+#define COMBINER_ENABLE_CLEAR 0x4
+#define COMBINER_INT_STATUS 0xC
+
+#define IRQ_IN_COMBINER 8
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+ unsigned int hwirq_offset;
+ unsigned int irq_mask;
+ void __iomem *base;
+ unsigned int parent_irq;
+#ifdef CONFIG_PM
+ u32 pm_save;
+#endif
+};
+
+static struct combiner_chip_data *combiner_data;
+static struct irq_domain *combiner_irq_domain;
+static unsigned int max_nr = 20;
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+ struct combiner_chip_data *combiner_data =
+ irq_data_get_irq_chip_data(data);
+
+ return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int combiner_irq;
+ unsigned long status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ spin_lock(&irq_controller_lock);
+ status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
+ spin_unlock(&irq_controller_lock);
+ status &= chip_data->irq_mask;
+
+ if (status == 0)
+ goto out;
+
+ combiner_irq = chip_data->hwirq_offset + __ffs(status);
+ ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
+ if (unlikely(ret))
+ handle_bad_irq(desc);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+#ifdef CONFIG_SMP
+static int combiner_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
+ struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
+
+ if (chip && chip->irq_set_affinity)
+ return chip->irq_set_affinity(data, mask_val, force);
+ else
+ return -EINVAL;
+}
+#endif
+
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = combiner_set_affinity,
+#endif
+};
+
+static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
+ unsigned int irq)
+{
+ irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
+ combiner_data);
+}
+
+static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
+ unsigned int combiner_nr,
+ void __iomem *base, unsigned int irq)
+{
+ combiner_data->base = base;
+ combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
+ combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
+ combiner_data->parent_irq = irq;
+
+ /* Disable all interrupts */
+ writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
+}
+
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (irq_domain_get_of_node(d) != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct combiner_chip_data *combiner_data = d->host_data;
+
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+static void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np)
+{
+ int i, irq;
+ unsigned int nr_irq;
+
+ nr_irq = max_nr * IRQ_IN_COMBINER;
+
+ combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
+ if (!combiner_data)
+ return;
+
+ combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
+ &combiner_irq_domain_ops, combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warn("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ irq = irq_of_parse_and_map(np, i);
+
+ combiner_init_one(&combiner_data[i], i,
+ combiner_base + (i >> 2) * 0x10, irq);
+ combiner_cascade_irq(&combiner_data[i], irq);
+ }
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * combiner_suspend - save interrupt combiner state before suspend
+ *
+ * Save the interrupt enable set register for all combiner groups since
+ * the state is lost when the system enters into a sleep state.
+ *
+ */
+static int combiner_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < max_nr; i++)
+ combiner_data[i].pm_save =
+ readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
+
+ return 0;
+}
+
+/**
+ * combiner_resume - restore interrupt combiner state after resume
+ *
+ * Restore the interrupt enable set register for all combiner groups since
+ * the state is lost when the system enters into a sleep state on suspend.
+ *
+ */
+static void combiner_resume(void)
+{
+ int i;
+
+ for (i = 0; i < max_nr; i++) {
+ writel_relaxed(combiner_data[i].irq_mask,
+ combiner_data[i].base + COMBINER_ENABLE_CLEAR);
+ writel_relaxed(combiner_data[i].pm_save,
+ combiner_data[i].base + COMBINER_ENABLE_SET);
+ }
+}
+
+#else
+#define combiner_suspend NULL
+#define combiner_resume NULL
+#endif
+
+static struct syscore_ops combiner_syscore_ops = {
+ .suspend = combiner_suspend,
+ .resume = combiner_resume,
+};
+
+static int __init combiner_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_info("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, max_nr);
+ }
+
+ combiner_init(combiner_base, np);
+
+ register_syscore_ops(&combiner_syscore_ops);
+
+ return 0;
+}
+IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
+ combiner_of_init);
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
new file mode 100644
index 000000000..886de028a
--- /dev/null
+++ b/drivers/irqchip/irq-al-fic.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+/* FIC Registers */
+#define AL_FIC_CAUSE 0x00
+#define AL_FIC_SET_CAUSE 0x08
+#define AL_FIC_MASK 0x10
+#define AL_FIC_CONTROL 0x28
+
+#define CONTROL_TRIGGER_RISING BIT(3)
+#define CONTROL_MASK_MSI_X BIT(5)
+
+#define NR_FIC_IRQS 32
+
+MODULE_AUTHOR("Talel Shenhar");
+MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver");
+MODULE_LICENSE("GPL v2");
+
+enum al_fic_state {
+ AL_FIC_UNCONFIGURED = 0,
+ AL_FIC_CONFIGURED_LEVEL,
+ AL_FIC_CONFIGURED_RISING_EDGE,
+};
+
+struct al_fic {
+ void __iomem *base;
+ struct irq_domain *domain;
+ const char *name;
+ unsigned int parent_irq;
+ enum al_fic_state state;
+};
+
+static void al_fic_set_trigger(struct al_fic *fic,
+ struct irq_chip_generic *gc,
+ enum al_fic_state new_state)
+{
+ irq_flow_handler_t handler;
+ u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL);
+
+ if (new_state == AL_FIC_CONFIGURED_LEVEL) {
+ handler = handle_level_irq;
+ control &= ~CONTROL_TRIGGER_RISING;
+ } else {
+ handler = handle_edge_irq;
+ control |= CONTROL_TRIGGER_RISING;
+ }
+ gc->chip_types->handler = handler;
+ fic->state = new_state;
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+}
+
+static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+ enum al_fic_state new_state;
+ int ret = 0;
+
+ irq_gc_lock(gc);
+
+ if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
+ ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
+ pr_debug("fic doesn't support flow type %d\n", flow_type);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
+ AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE;
+
+ /*
+ * A given FIC instance can be either all level or all edge triggered.
+ * This is generally fixed depending on what pieces of HW it's wired up
+ * to.
+ *
+ * We configure it based on the sensitivity of the first source
+ * being setup, and reject any subsequent attempt at configuring it in a
+ * different way.
+ */
+ if (fic->state == AL_FIC_UNCONFIGURED) {
+ al_fic_set_trigger(fic, gc, new_state);
+ } else if (fic->state != new_state) {
+ pr_debug("fic %s state already configured to %d\n",
+ fic->name, fic->state);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ irq_gc_unlock(gc);
+
+ return ret;
+}
+
+static void al_fic_irq_handler(struct irq_desc *desc)
+{
+ struct al_fic *fic = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = fic->domain;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ unsigned long pending;
+ u32 hwirq;
+
+ chained_irq_enter(irqchip, desc);
+
+ pending = readl_relaxed(fic->base + AL_FIC_CAUSE);
+ pending &= ~gc->mask_cache;
+
+ for_each_set_bit(hwirq, &pending, NR_FIC_IRQS)
+ generic_handle_domain_irq(domain, hwirq);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+
+ writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+ return 1;
+}
+
+static int al_fic_register(struct device_node *node,
+ struct al_fic *fic)
+{
+ struct irq_chip_generic *gc;
+ int ret;
+
+ fic->domain = irq_domain_add_linear(node,
+ NR_FIC_IRQS,
+ &irq_generic_chip_ops,
+ fic);
+ if (!fic->domain) {
+ pr_err("fail to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(fic->domain,
+ NR_FIC_IRQS,
+ 1, fic->name,
+ handle_level_irq,
+ 0, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("fail to allocate generic chip (%d)\n", ret);
+ goto err_domain_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(fic->domain, 0);
+ gc->reg_base = fic->base;
+ gc->chip_types->regs.mask = AL_FIC_MASK;
+ gc->chip_types->regs.ack = AL_FIC_CAUSE;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
+ gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
+ gc->private = fic;
+
+ irq_set_chained_handler_and_data(fic->parent_irq,
+ al_fic_irq_handler,
+ fic);
+ return 0;
+
+err_domain_remove:
+ irq_domain_remove(fic->domain);
+
+ return ret;
+}
+
+/*
+ * al_fic_wire_init() - initialize and configure fic in wire mode
+ * @of_node: optional pointer to interrupt controller's device tree node.
+ * @base: mmio to fic register
+ * @name: name of the fic
+ * @parent_irq: interrupt of parent
+ *
+ * This API will configure the fic hardware to to work in wire mode.
+ * In wire mode, fic hardware is generating a wire ("wired") interrupt.
+ * Interrupt can be generated based on positive edge or level - configuration is
+ * to be determined based on connected hardware to this fic.
+ */
+static struct al_fic *al_fic_wire_init(struct device_node *node,
+ void __iomem *base,
+ const char *name,
+ unsigned int parent_irq)
+{
+ struct al_fic *fic;
+ int ret;
+ u32 control = CONTROL_MASK_MSI_X;
+
+ fic = kzalloc(sizeof(*fic), GFP_KERNEL);
+ if (!fic)
+ return ERR_PTR(-ENOMEM);
+
+ fic->base = base;
+ fic->parent_irq = parent_irq;
+ fic->name = name;
+
+ /* mask out all interrupts */
+ writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK);
+
+ /* clear any pending interrupt */
+ writel_relaxed(0, fic->base + AL_FIC_CAUSE);
+
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+
+ ret = al_fic_register(node, fic);
+ if (ret) {
+ pr_err("fail to register irqchip\n");
+ goto err_free;
+ }
+
+ pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n",
+ fic->name, parent_irq);
+
+ return fic;
+
+err_free:
+ kfree(fic);
+ return ERR_PTR(ret);
+}
+
+static int __init al_fic_init_dt(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+ void __iomem *base;
+ unsigned int parent_irq;
+ struct al_fic *fic;
+
+ if (!parent) {
+ pr_err("%s: unsupported - device require a parent\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: fail to map memory\n", node->name);
+ return -ENOMEM;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("%s: fail to map irq\n", node->name);
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ fic = al_fic_wire_init(node,
+ base,
+ node->name,
+ parent_irq);
+ if (IS_ERR(fic)) {
+ pr_err("%s: fail to initialize irqchip (%lu)\n",
+ node->name,
+ PTR_ERR(fic));
+ ret = PTR_ERR(fic);
+ goto err_irq_dispose;
+ }
+
+ return 0;
+
+err_irq_dispose:
+ irq_dispose_mapping(parent_irq);
+err_unmap:
+ iounmap(base);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt);
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 000000000..fc1ef7de3
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,292 @@
+/*
+ * Annapurna Labs MSIX support services
+ *
+ * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm/msi.h>
+
+/* MSIX message address format: local GIC target */
+#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
+
+struct alpine_msix_data {
+ spinlock_t msi_map_lock;
+ phys_addr_t addr;
+ u32 spi_first; /* The SGI number that MSIs start */
+ u32 num_spis; /* The number of SGIs for MSIs */
+ unsigned long *msi_map;
+};
+
+static void alpine_msix_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void alpine_msix_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip alpine_msix_irq_chip = {
+ .name = "MSIx",
+ .irq_mask = alpine_msix_mask_msi_irq,
+ .irq_unmask = alpine_msix_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
+{
+ int first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
+ num_req, 0);
+ if (first >= priv->num_spis) {
+ spin_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ bitmap_set(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+
+ return priv->spi_first + first;
+}
+
+static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
+ int num_req)
+{
+ int first = sgi - priv->spi_first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ bitmap_clear(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+}
+
+static void alpine_msix_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
+ phys_addr_t msg_addr = priv->addr;
+
+ msg_addr |= (data->hwirq << 3);
+
+ msg->address_hi = upper_32_bits(msg_addr);
+ msg->address_lo = lower_32_bits(msg_addr);
+ msg->data = 0;
+}
+
+static struct msi_domain_info alpine_msix_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .chip = &alpine_msix_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "alpine_msix_middle",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = alpine_msix_compose_msi_msg,
+};
+
+static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int sgi)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ if (!is_of_node(domain->parent->fwnode))
+ return -EINVAL;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = sgi;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ return 0;
+}
+
+static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct alpine_msix_data *priv = domain->host_data;
+ int sgi, err, i;
+
+ sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
+ if (sgi < 0)
+ return sgi;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
+ if (err)
+ goto err_sgi;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_sgi:
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+ alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ return err;
+}
+
+static void alpine_msix_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
+ .alloc = alpine_msix_middle_domain_alloc,
+ .free = alpine_msix_middle_domain_free,
+};
+
+static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+ struct device_node *node)
+{
+ struct irq_domain *middle_domain, *msi_domain, *gic_domain;
+ struct device_node *gic_node;
+
+ gic_node = of_irq_find_parent(node);
+ if (!gic_node) {
+ pr_err("Failed to find the GIC node\n");
+ return -ENODEV;
+ }
+
+ gic_domain = irq_find_host(gic_node);
+ of_node_put(gic_node);
+ if (!gic_domain) {
+ pr_err("Failed to find the GIC domain\n");
+ return -ENXIO;
+ }
+
+ middle_domain = irq_domain_add_tree(NULL,
+ &alpine_msix_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSIX middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = gic_domain;
+
+ msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &alpine_msix_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int alpine_msix_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct alpine_msix_data *priv;
+ struct resource res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->msi_map_lock);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate resource\n");
+ goto err_priv;
+ }
+
+ /*
+ * The 20 least significant bits of addr provide direct information
+ * regarding the interrupt destination.
+ *
+ * To select the primary GIC as the target GIC, bits [18:17] must be set
+ * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
+ */
+ priv->addr = res.start & GENMASK_ULL(63,20);
+ priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
+
+ if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
+ pr_err("Unable to parse MSI base\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
+ pr_err("Unable to parse MSI numbers\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ priv->msi_map = bitmap_zalloc(priv->num_spis, GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+ }
+
+ pr_debug("Registering %d msixs, starting at %d\n",
+ priv->num_spis, priv->spi_first);
+
+ ret = alpine_msix_init_domains(priv, node);
+ if (ret)
+ goto err_map;
+
+ return 0;
+
+err_map:
+ bitmap_free(priv->msi_map);
+err_priv:
+ kfree(priv);
+ return ret;
+}
+IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
new file mode 100644
index 000000000..1c2813ad8
--- /dev/null
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -0,0 +1,1199 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on irq-lpc32xx:
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ * Based on irq-bcm2836:
+ * Copyright 2015 Broadcom
+ */
+
+/*
+ * AIC is a fairly simple interrupt controller with the following features:
+ *
+ * - 896 level-triggered hardware IRQs
+ * - Single mask bit per IRQ
+ * - Per-IRQ affinity setting
+ * - Automatic masking on event delivery (auto-ack)
+ * - Software triggering (ORed with hw line)
+ * - 2 per-CPU IPIs (meant as "self" and "other", but they are
+ * interchangeable if not symmetric)
+ * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
+ * higher priority)
+ * - Automatic masking on ack
+ * - Default "this CPU" register view and explicit per-CPU views
+ *
+ * In addition, this driver also handles FIQs, as these are routed to the same
+ * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
+ * performance counters (TODO).
+ *
+ * Implementation notes:
+ *
+ * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
+ * and one for IPIs.
+ * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
+ * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
+ * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
+ * - DT bindings use 3-cell form (like GIC):
+ * - <0 nr flags> - hwirq #nr
+ * - <1 nr flags> - FIQ #nr
+ * - nr=0 Physical HV timer
+ * - nr=1 Virtual HV timer
+ * - nr=2 Physical guest timer
+ * - nr=3 Virtual guest timer
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cpuhotplug.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-vgic-info.h>
+#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
+#include <linux/limits.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/apple_m1_pmu.h>
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/sysreg.h>
+#include <asm/virt.h>
+
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+
+/*
+ * AIC v1 registers (MMIO)
+ */
+
+#define AIC_INFO 0x0004
+#define AIC_INFO_NR_IRQ GENMASK(15, 0)
+
+#define AIC_CONFIG 0x0010
+
+#define AIC_WHOAMI 0x2000
+#define AIC_EVENT 0x2004
+#define AIC_EVENT_DIE GENMASK(31, 24)
+#define AIC_EVENT_TYPE GENMASK(23, 16)
+#define AIC_EVENT_NUM GENMASK(15, 0)
+
+#define AIC_EVENT_TYPE_FIQ 0 /* Software use */
+#define AIC_EVENT_TYPE_IRQ 1
+#define AIC_EVENT_TYPE_IPI 4
+#define AIC_EVENT_IPI_OTHER 1
+#define AIC_EVENT_IPI_SELF 2
+
+#define AIC_IPI_SEND 0x2008
+#define AIC_IPI_ACK 0x200c
+#define AIC_IPI_MASK_SET 0x2024
+#define AIC_IPI_MASK_CLR 0x2028
+
+#define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
+
+#define AIC_IPI_OTHER BIT(0)
+#define AIC_IPI_SELF BIT(31)
+
+#define AIC_TARGET_CPU 0x3000
+
+#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
+#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
+#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
+#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
+
+#define AIC_MAX_IRQ 0x400
+
+/*
+ * AIC v2 registers (MMIO)
+ */
+
+#define AIC2_VERSION 0x0000
+#define AIC2_VERSION_VER GENMASK(7, 0)
+
+#define AIC2_INFO1 0x0004
+#define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
+#define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
+
+#define AIC2_INFO2 0x0008
+
+#define AIC2_INFO3 0x000c
+#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
+#define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
+
+#define AIC2_RESET 0x0010
+#define AIC2_RESET_RESET BIT(0)
+
+#define AIC2_CONFIG 0x0014
+#define AIC2_CONFIG_ENABLE BIT(0)
+#define AIC2_CONFIG_PREFER_PCPU BIT(28)
+
+#define AIC2_TIMEOUT 0x0028
+#define AIC2_CLUSTER_PRIO 0x0030
+#define AIC2_DELAY_GROUPS 0x0100
+
+#define AIC2_IRQ_CFG 0x2000
+
+/*
+ * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
+ *
+ * Repeat for each die:
+ * IRQ_CFG: u32 * MAX_IRQS
+ * SW_SET: u32 * (MAX_IRQS / 32)
+ * SW_CLR: u32 * (MAX_IRQS / 32)
+ * MASK_SET: u32 * (MAX_IRQS / 32)
+ * MASK_CLR: u32 * (MAX_IRQS / 32)
+ * HW_STATE: u32 * (MAX_IRQS / 32)
+ *
+ * This is followed by a set of event registers, each 16K page aligned.
+ * The first one is the AP event register we will use. Unfortunately,
+ * the actual implemented die count is not specified anywhere in the
+ * capability registers, so we have to explicitly specify the event
+ * register as a second reg entry in the device tree to remain
+ * forward-compatible.
+ */
+
+#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
+#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
+
+#define MASK_REG(x) (4 * ((x) >> 5))
+#define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
+
+/*
+ * IMP-DEF sysregs that control FIQ sources
+ */
+
+/* IPI request registers */
+#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
+#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
+#define IPI_RR_CPU GENMASK(7, 0)
+/* Cluster only used for the GLOBAL register */
+#define IPI_RR_CLUSTER GENMASK(23, 16)
+#define IPI_RR_TYPE GENMASK(29, 28)
+#define IPI_RR_IMMEDIATE 0
+#define IPI_RR_RETRACT 1
+#define IPI_RR_DEFERRED 2
+#define IPI_RR_NOWAKE 3
+
+/* IPI status register */
+#define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
+#define IPI_SR_PENDING BIT(0)
+
+/* Guest timer FIQ enable register */
+#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
+#define VM_TMR_FIQ_ENABLE_V BIT(0)
+#define VM_TMR_FIQ_ENABLE_P BIT(1)
+
+/* Deferred IPI countdown register */
+#define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
+
+/* Uncore PMC control register */
+#define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
+#define UPMCR0_IMODE GENMASK(18, 16)
+#define UPMCR0_IMODE_OFF 0
+#define UPMCR0_IMODE_AIC 2
+#define UPMCR0_IMODE_HALT 3
+#define UPMCR0_IMODE_FIQ 4
+
+/* Uncore PMC status register */
+#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
+#define UPMSR_IACT BIT(0)
+
+/* MPIDR fields */
+#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
+#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
+
+#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
+ FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, irq))
+#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
+ FIELD_PREP(AIC_EVENT_NUM, x))
+#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
+#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
+#define AIC_NR_FIQ 6
+#define AIC_NR_SWIPI 32
+
+/*
+ * FIQ hwirq index definitions: FIQ sources use the DT binding defines
+ * directly, except that timers are special. At the irqchip level, the
+ * two timer types are represented by their access method: _EL0 registers
+ * or _EL02 registers. In the DT binding, the timers are represented
+ * by their purpose (HV or guest). This mapping is for when the kernel is
+ * running at EL2 (with VHE). When the kernel is running at EL1, the
+ * mapping differs and aic_irq_domain_translate() performs the remapping.
+ */
+
+#define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS
+#define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT
+#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
+#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
+
+static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+
+struct aic_info {
+ int version;
+
+ /* Register offsets */
+ u32 event;
+ u32 target_cpu;
+ u32 irq_cfg;
+ u32 sw_set;
+ u32 sw_clr;
+ u32 mask_set;
+ u32 mask_clr;
+
+ u32 die_stride;
+
+ /* Features */
+ bool fast_ipi;
+};
+
+static const struct aic_info aic1_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+};
+
+static const struct aic_info aic1_fipi_info = {
+ .version = 1,
+
+ .event = AIC_EVENT,
+ .target_cpu = AIC_TARGET_CPU,
+
+ .fast_ipi = true,
+};
+
+static const struct aic_info aic2_info = {
+ .version = 2,
+
+ .irq_cfg = AIC2_IRQ_CFG,
+
+ .fast_ipi = true,
+};
+
+static const struct of_device_id aic_info_match[] = {
+ {
+ .compatible = "apple,t8103-aic",
+ .data = &aic1_fipi_info,
+ },
+ {
+ .compatible = "apple,aic",
+ .data = &aic1_info,
+ },
+ {
+ .compatible = "apple,aic2",
+ .data = &aic2_info,
+ },
+ {}
+};
+
+struct aic_irq_chip {
+ void __iomem *base;
+ void __iomem *event;
+ struct irq_domain *hw_domain;
+ struct irq_domain *ipi_domain;
+ struct {
+ cpumask_t aff;
+ } *fiq_aff[AIC_NR_FIQ];
+
+ int nr_irq;
+ int max_irq;
+ int nr_die;
+ int max_die;
+
+ struct aic_info info;
+};
+
+static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
+
+static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
+static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
+
+static struct aic_irq_chip *aic_irqc;
+
+static void aic_handle_ipi(struct pt_regs *regs);
+
+static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
+{
+ return readl_relaxed(ic->base + reg);
+}
+
+static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
+{
+ writel_relaxed(val, ic->base + reg);
+}
+
+/*
+ * IRQ irqchip
+ */
+
+static void aic_irq_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
+}
+
+static void aic_irq_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
+ u32 irq = AIC_HWIRQ_IRQ(hwirq);
+
+ aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
+}
+
+static void aic_irq_eoi(struct irq_data *d)
+{
+ /*
+ * Reading the interrupt reason automatically acknowledges and masks
+ * the IRQ, so we just unmask it here if needed.
+ */
+ if (!irqd_irq_masked(d))
+ aic_irq_unmask(d);
+}
+
+static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
+{
+ struct aic_irq_chip *ic = aic_irqc;
+ u32 event, type, irq;
+
+ do {
+ /*
+ * We cannot use a relaxed read here, as reads from DMA buffers
+ * need to be ordered after the IRQ fires.
+ */
+ event = readl(ic->event + ic->info.event);
+ type = FIELD_GET(AIC_EVENT_TYPE, event);
+ irq = FIELD_GET(AIC_EVENT_NUM, event);
+
+ if (type == AIC_EVENT_TYPE_IRQ)
+ generic_handle_domain_irq(aic_irqc->hw_domain, event);
+ else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
+ aic_handle_ipi(regs);
+ else if (event != 0)
+ pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
+ } while (event);
+
+ /*
+ * vGIC maintenance interrupts end up here too, so we need to check
+ * for them separately. This should never trigger if KVM is working
+ * properly, because it will have already taken care of clearing it
+ * on guest exit before this handler runs.
+ */
+ if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
+ pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ }
+}
+
+static int aic_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ int cpu;
+
+ BUG_ON(!ic->info.target_cpu);
+
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
+ aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int aic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ /*
+ * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
+ * have a way to find out the type of any given IRQ, so just allow both.
+ */
+ return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
+}
+
+static struct irq_chip aic_chip = {
+ .name = "AIC",
+ .irq_mask = aic_irq_mask,
+ .irq_unmask = aic_irq_unmask,
+ .irq_eoi = aic_irq_eoi,
+ .irq_set_affinity = aic_irq_set_affinity,
+ .irq_set_type = aic_irq_set_type,
+};
+
+static struct irq_chip aic2_chip = {
+ .name = "AIC2",
+ .irq_mask = aic_irq_mask,
+ .irq_unmask = aic_irq_unmask,
+ .irq_eoi = aic_irq_eoi,
+ .irq_set_type = aic_irq_set_type,
+};
+
+/*
+ * FIQ irqchip
+ */
+
+static unsigned long aic_fiq_get_idx(struct irq_data *d)
+{
+ return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
+}
+
+static void aic_fiq_set_mask(struct irq_data *d)
+{
+ /* Only the guest timers have real mask bits, unfortunately. */
+ switch (aic_fiq_get_idx(d)) {
+ case AIC_TMR_EL02_PHYS:
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
+ isb();
+ break;
+ case AIC_TMR_EL02_VIRT:
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
+ isb();
+ break;
+ default:
+ break;
+ }
+}
+
+static void aic_fiq_clear_mask(struct irq_data *d)
+{
+ switch (aic_fiq_get_idx(d)) {
+ case AIC_TMR_EL02_PHYS:
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
+ isb();
+ break;
+ case AIC_TMR_EL02_VIRT:
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
+ isb();
+ break;
+ default:
+ break;
+ }
+}
+
+static void aic_fiq_mask(struct irq_data *d)
+{
+ aic_fiq_set_mask(d);
+ __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
+}
+
+static void aic_fiq_unmask(struct irq_data *d)
+{
+ aic_fiq_clear_mask(d);
+ __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
+}
+
+static void aic_fiq_eoi(struct irq_data *d)
+{
+ /* We mask to ack (where we can), so we need to unmask at EOI. */
+ if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
+ aic_fiq_clear_mask(d);
+}
+
+#define TIMER_FIRING(x) \
+ (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
+ ARCH_TIMER_CTRL_IT_STAT)) == \
+ (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
+
+static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
+{
+ /*
+ * It would be really nice if we had a system register that lets us get
+ * the FIQ source state without having to peek down into sources...
+ * but such a register does not seem to exist.
+ *
+ * So, we have these potential sources to test for:
+ * - Fast IPIs (not yet used)
+ * - The 4 timers (CNTP, CNTV for each of HV and guest)
+ * - Per-core PMCs (not yet supported)
+ * - Per-cluster uncore PMCs (not yet supported)
+ *
+ * Since not dealing with any of these results in a FIQ storm,
+ * we check for everything here, even things we don't support yet.
+ */
+
+ if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_handle_ipi(regs);
+ } else {
+ pr_err_ratelimited("Fast IPI fired. Acking.\n");
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ }
+ }
+
+ if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
+
+ if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
+
+ if (is_kernel_in_hyp_mode()) {
+ uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
+
+ if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
+ TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
+
+ if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
+ TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
+ }
+
+ if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ int irq;
+ if (cpumask_test_cpu(smp_processor_id(),
+ &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
+ irq = AIC_CPU_PMU_P;
+ else
+ irq = AIC_CPU_PMU_E;
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ AIC_FIQ_HWIRQ(irq));
+ }
+
+ if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
+ (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
+ /* Same story with uncore PMCs */
+ pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
+ sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
+ FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+ }
+}
+
+static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
+{
+ return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
+}
+
+static struct irq_chip fiq_chip = {
+ .name = "AIC-FIQ",
+ .irq_mask = aic_fiq_mask,
+ .irq_unmask = aic_fiq_unmask,
+ .irq_ack = aic_fiq_set_mask,
+ .irq_eoi = aic_fiq_eoi,
+ .irq_set_type = aic_fiq_set_type,
+};
+
+/*
+ * Main IRQ domain
+ */
+
+static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct aic_irq_chip *ic = id->host_data;
+ u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
+ struct irq_chip *chip = &aic_chip;
+
+ if (ic->info.version == 2)
+ chip = &aic2_chip;
+
+ if (type == AIC_EVENT_TYPE_IRQ) {
+ irq_domain_set_info(id, irq, hw, chip, id->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ } else {
+ int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
+
+ switch (fiq) {
+ case AIC_CPU_PMU_P:
+ case AIC_CPU_PMU_E:
+ irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
+ break;
+ default:
+ irq_set_percpu_devid(irq);
+ break;
+ }
+
+ irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static int aic_irq_domain_translate(struct irq_domain *id,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct aic_irq_chip *ic = id->host_data;
+ u32 *args;
+ u32 die = 0;
+
+ if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
+ !is_of_node(fwspec->fwnode))
+ return -EINVAL;
+
+ args = &fwspec->param[1];
+
+ if (fwspec->param_count == 4) {
+ die = args[0];
+ args++;
+ }
+
+ switch (fwspec->param[0]) {
+ case AIC_IRQ:
+ if (die >= ic->nr_die)
+ return -EINVAL;
+ if (args[0] >= ic->nr_irq)
+ return -EINVAL;
+ *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
+ break;
+ case AIC_FIQ:
+ if (die != 0)
+ return -EINVAL;
+ if (args[0] >= AIC_NR_FIQ)
+ return -EINVAL;
+ *hwirq = AIC_FIQ_HWIRQ(args[0]);
+
+ /*
+ * In EL1 the non-redirected registers are the guest's,
+ * not EL2's, so remap the hwirqs to match.
+ */
+ if (!is_kernel_in_hyp_mode()) {
+ switch (args[0]) {
+ case AIC_TMR_GUEST_PHYS:
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
+ break;
+ case AIC_TMR_GUEST_VIRT:
+ *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
+ break;
+ case AIC_TMR_HV_PHYS:
+ case AIC_TMR_HV_VIRT:
+ return -ENOENT;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *type = args[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ int i, ret;
+
+ ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops aic_irq_domain_ops = {
+ .translate = aic_irq_domain_translate,
+ .alloc = aic_irq_domain_alloc,
+ .free = aic_irq_domain_free,
+};
+
+/*
+ * IPI irqchip
+ */
+
+static void aic_ipi_send_fast(int cpu)
+{
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 my_mpidr = read_cpuid_mpidr();
+ u64 cluster = MPIDR_CLUSTER(mpidr);
+ u64 idx = MPIDR_CPU(mpidr);
+
+ if (MPIDR_CLUSTER(my_mpidr) == cluster)
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
+ SYS_IMP_APL_IPI_RR_LOCAL_EL1);
+ else
+ write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
+ SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
+ isb();
+}
+
+static void aic_ipi_mask(struct irq_data *d)
+{
+ u32 irq_bit = BIT(irqd_to_hwirq(d));
+
+ /* No specific ordering requirements needed here. */
+ atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
+}
+
+static void aic_ipi_unmask(struct irq_data *d)
+{
+ struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 irq_bit = BIT(irqd_to_hwirq(d));
+
+ atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
+
+ /*
+ * The atomic_or() above must complete before the atomic_read()
+ * below to avoid racing aic_ipi_send_mask().
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
+ * No barriers needed here since this is a self-IPI.
+ */
+ if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(smp_processor_id());
+ else
+ aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
+ }
+}
+
+static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+ struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 irq_bit = BIT(irqd_to_hwirq(d));
+ u32 send = 0;
+ int cpu;
+ unsigned long pending;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * This sequence is the mirror of the one in aic_ipi_unmask();
+ * see the comment there. Additionally, release semantics
+ * ensure that the vIPI flag set is ordered after any shared
+ * memory accesses that precede it. This therefore also pairs
+ * with the atomic_fetch_andnot in aic_handle_ipi().
+ */
+ pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
+
+ /*
+ * The atomic_fetch_or_release() above must complete before the
+ * atomic_read() below to avoid racing aic_ipi_unmask().
+ */
+ smp_mb__after_atomic();
+
+ if (!(pending & irq_bit) &&
+ (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(cpu);
+ else
+ send |= AIC_IPI_SEND_CPU(cpu);
+ }
+ }
+
+ /*
+ * The flag writes must complete before the physical IPI is issued
+ * to another CPU. This is implied by the control dependency on
+ * the result of atomic_read_acquire() above, which is itself
+ * already ordered after the vIPI flag write.
+ */
+ if (send)
+ aic_ic_write(ic, AIC_IPI_SEND, send);
+}
+
+static struct irq_chip ipi_chip = {
+ .name = "AIC-IPI",
+ .irq_mask = aic_ipi_mask,
+ .irq_unmask = aic_ipi_unmask,
+ .ipi_send_mask = aic_ipi_send_mask,
+};
+
+/*
+ * IPI IRQ domain
+ */
+
+static void aic_handle_ipi(struct pt_regs *regs)
+{
+ int i;
+ unsigned long enabled, firing;
+
+ /*
+ * Ack the IPI. We need to order this after the AIC event read, but
+ * that is enforced by normal MMIO ordering guarantees.
+ *
+ * For the Fast IPI case, this needs to be ordered before the vIPI
+ * handling below, so we need to isb();
+ */
+ if (static_branch_likely(&use_fast_ipi)) {
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+ isb();
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
+ }
+
+ /*
+ * The mask read does not need to be ordered. Only we can change
+ * our own mask anyway, so no races are possible here, as long as
+ * we are properly in the interrupt handler (which is covered by
+ * the barrier that is part of the top-level AIC handler's readl()).
+ */
+ enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
+
+ /*
+ * Clear the IPIs we are about to handle. This pairs with the
+ * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
+ * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
+ * before IPI handling code (to avoid races handling vIPIs before they
+ * are signaled). The former is taken care of by the release semantics
+ * of the write portion, while the latter is taken care of by the
+ * acquire semantics of the read portion.
+ */
+ firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
+
+ for_each_set_bit(i, &firing, AIC_NR_SWIPI)
+ generic_handle_domain_irq(aic_irqc->ipi_domain, i);
+
+ /*
+ * No ordering needed here; at worst this just changes the timing of
+ * when the next IPI will be delivered.
+ */
+ if (!static_branch_likely(&use_fast_ipi))
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+}
+
+static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_set_percpu_devid(virq + i);
+ irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
+{
+ /* Not freeing IPIs */
+}
+
+static const struct irq_domain_ops aic_ipi_domain_ops = {
+ .alloc = aic_ipi_alloc,
+ .free = aic_ipi_free,
+};
+
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+{
+ struct irq_domain *ipi_domain;
+ int base_ipi;
+
+ ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
+ &aic_ipi_domain_ops, irqc);
+ if (WARN_ON(!ipi_domain))
+ return -ENODEV;
+
+ ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+
+ base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
+ NUMA_NO_NODE, NULL, false, NULL);
+
+ if (WARN_ON(!base_ipi)) {
+ irq_domain_remove(ipi_domain);
+ return -ENODEV;
+ }
+
+ set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
+
+ irqc->ipi_domain = ipi_domain;
+
+ return 0;
+}
+
+static int aic_init_cpu(unsigned int cpu)
+{
+ /* Mask all hard-wired per-CPU IRQ/FIQ sources */
+
+ /* Pending Fast IPI FIQs */
+ write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
+
+ /* Timer FIQs */
+ sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
+ sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
+
+ /* EL2-only (VHE mode) IRQ sources */
+ if (is_kernel_in_hyp_mode()) {
+ /* Guest timers */
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
+ VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
+
+ /* vGIC maintenance IRQ */
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ }
+
+ /* PMC FIQ */
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
+ FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
+
+ /* Uncore PMC FIQ */
+ sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
+ FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
+
+ /* Commit all of the above */
+ isb();
+
+ if (aic_irqc->info.version == 1) {
+ /*
+ * Make sure the kernel's idea of logical CPU order is the same as AIC's
+ * If we ever end up with a mismatch here, we will have to introduce
+ * a mapping table similar to what other irqchip drivers do.
+ */
+ WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
+
+ /*
+ * Always keep IPIs unmasked at the hardware level (except auto-masking
+ * by AIC during processing). We manage masks at the vIPI level.
+ * These registers only exist on AICv1, AICv2 always uses fast IPIs.
+ */
+ aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
+ if (static_branch_likely(&use_fast_ipi)) {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
+ } else {
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
+ aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
+ }
+ }
+
+ /* Initialize the local mask state */
+ __this_cpu_write(aic_fiq_unmasked, 0);
+
+ return 0;
+}
+
+static struct gic_kvm_info vgic_info __initdata = {
+ .type = GIC_V3,
+ .no_maint_irq_mask = true,
+ .no_hw_deactivation = true,
+};
+
+static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
+{
+ int i, n;
+ u32 fiq;
+
+ if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
+ WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
+ return;
+
+ n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
+ if (WARN_ON(n < 0))
+ return;
+
+ ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
+ if (!ic->fiq_aff[fiq])
+ return;
+
+ for (i = 0; i < n; i++) {
+ struct device_node *cpu_node;
+ u32 cpu_phandle;
+ int cpu;
+
+ if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ if (WARN_ON(cpu < 0))
+ continue;
+
+ cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
+ }
+}
+
+static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
+{
+ int i, die;
+ u32 off, start_off;
+ void __iomem *regs;
+ struct aic_irq_chip *irqc;
+ struct device_node *affs;
+ const struct of_device_id *match;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc) {
+ iounmap(regs);
+ return -ENOMEM;
+ }
+
+ irqc->base = regs;
+
+ match = of_match_node(aic_info_match, node);
+ if (!match)
+ goto err_unmap;
+
+ irqc->info = *(struct aic_info *)match->data;
+
+ aic_irqc = irqc;
+
+ switch (irqc->info.version) {
+ case 1: {
+ u32 info;
+
+ info = aic_ic_read(irqc, AIC_INFO);
+ irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
+ irqc->max_irq = AIC_MAX_IRQ;
+ irqc->nr_die = irqc->max_die = 1;
+
+ off = start_off = irqc->info.target_cpu;
+ off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
+
+ irqc->event = irqc->base;
+
+ break;
+ }
+ case 2: {
+ u32 info1, info3;
+
+ info1 = aic_ic_read(irqc, AIC2_INFO1);
+ info3 = aic_ic_read(irqc, AIC2_INFO3);
+
+ irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
+ irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
+ irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
+ irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
+
+ off = start_off = irqc->info.irq_cfg;
+ off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
+
+ irqc->event = of_iomap(node, 1);
+ if (WARN_ON(!irqc->event))
+ goto err_unmap;
+
+ break;
+ }
+ }
+
+ irqc->info.sw_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
+ irqc->info.sw_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
+ irqc->info.mask_set = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
+ irqc->info.mask_clr = off;
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
+ off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
+
+ if (irqc->info.fast_ipi)
+ static_branch_enable(&use_fast_ipi);
+ else
+ static_branch_disable(&use_fast_ipi);
+
+ irqc->info.die_stride = off - start_off;
+
+ irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
+ &aic_irq_domain_ops, irqc);
+ if (WARN_ON(!irqc->hw_domain))
+ goto err_unmap;
+
+ irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
+
+ if (aic_init_smp(irqc, node))
+ goto err_remove_domain;
+
+ affs = of_get_child_by_name(node, "affinities");
+ if (affs) {
+ struct device_node *chld;
+
+ for_each_child_of_node(affs, chld)
+ build_fiq_affinity(irqc, chld);
+ }
+ of_node_put(affs);
+
+ set_handle_irq(aic_handle_irq);
+ set_handle_fiq(aic_handle_fiq);
+
+ off = 0;
+ for (die = 0; die < irqc->nr_die; die++) {
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
+ for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
+ aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
+ if (irqc->info.target_cpu)
+ for (i = 0; i < irqc->nr_irq; i++)
+ aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
+ off += irqc->info.die_stride;
+ }
+
+ if (irqc->info.version == 2) {
+ u32 config = aic_ic_read(irqc, AIC2_CONFIG);
+
+ config |= AIC2_CONFIG_ENABLE;
+ aic_ic_write(irqc, AIC2_CONFIG, config);
+ }
+
+ if (!is_kernel_in_hyp_mode())
+ pr_info("Kernel running in EL1, mapping interrupts");
+
+ if (static_branch_likely(&use_fast_ipi))
+ pr_info("Using Fast IPIs");
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
+ "irqchip/apple-aic/ipi:starting",
+ aic_init_cpu, NULL);
+
+ vgic_set_kvm_info(&vgic_info);
+
+ pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
+ irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
+
+ return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->hw_domain);
+err_unmap:
+ if (irqc->event && irqc->event != irqc->base)
+ iounmap(irqc->event);
+ iounmap(irqc->base);
+ kfree(irqc);
+ return -ENODEV;
+}
+
+IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
new file mode 100644
index 000000000..ee18eb3e7
--- /dev/null
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -0,0 +1,826 @@
+/*
+ * Marvell Armada 370 and Armada XP SoC IRQ handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/msi.h>
+#include <asm/mach/arch.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+/*
+ * Overall diagram of the Armada XP interrupt controller:
+ *
+ * To CPU 0 To CPU 1
+ *
+ * /\ /\
+ * || ||
+ * +---------------+ +---------------+
+ * | | | |
+ * | per-CPU | | per-CPU |
+ * | mask/unmask | | mask/unmask |
+ * | CPU0 | | CPU1 |
+ * | | | |
+ * +---------------+ +---------------+
+ * /\ /\
+ * || ||
+ * \\_______________________//
+ * ||
+ * +-------------------+
+ * | |
+ * | Global interrupt |
+ * | mask/unmask |
+ * | |
+ * +-------------------+
+ * /\
+ * ||
+ * interrupt from
+ * device
+ *
+ * The "global interrupt mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
+ * to "main_int_base".
+ *
+ * The "per-CPU mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_MASK_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
+ * "per_cpu_int_base". This base address points to a special address,
+ * which automatically accesses the registers of the current CPU.
+ *
+ * The per-CPU mask/unmask can also be adjusted using the global
+ * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
+ * to configure interrupt affinity.
+ *
+ * Due to this model, all interrupts need to be mask/unmasked at two
+ * different levels: at the global level and at the per-CPU level.
+ *
+ * This driver takes the following approach to deal with this:
+ *
+ * - For global interrupts:
+ *
+ * At ->map() time, a global interrupt is unmasked at the per-CPU
+ * mask/unmask level. It is therefore unmasked at this level for
+ * the current CPU, running the ->map() code. This allows to have
+ * the interrupt unmasked at this level in non-SMP
+ * configurations. In SMP configurations, the ->set_affinity()
+ * callback is called, which using the
+ * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
+ * for the interrupt.
+ *
+ * The ->mask() and ->unmask() operations only mask/unmask the
+ * interrupt at the "global" level.
+ *
+ * So, a global interrupt is enabled at the per-CPU level as soon
+ * as it is mapped. At run time, the masking/unmasking takes place
+ * at the global level.
+ *
+ * - For per-CPU interrupts
+ *
+ * At ->map() time, a per-CPU interrupt is unmasked at the global
+ * mask/unmask level.
+ *
+ * The ->mask() and ->unmask() operations mask/unmask the interrupt
+ * at the per-CPU level.
+ *
+ * So, a per-CPU interrupt is enabled at the global level as soon
+ * as it is mapped. At run time, the masking/unmasking takes place
+ * at the per-CPU level.
+ */
+
+/* Registers relative to main_int_base */
+#define ARMADA_370_XP_INT_CONTROL (0x00)
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
+#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
+#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
+#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
+
+/* Registers relative to per_cpu_int_base */
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c)
+#define ARMADA_375_PPI_CAUSE (0x10)
+#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
+#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
+#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
+#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
+
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+
+#define IPI_DOORBELL_START (0)
+#define IPI_DOORBELL_END (8)
+#define IPI_DOORBELL_MASK 0xFF
+#define PCI_MSI_DOORBELL_START (16)
+#define PCI_MSI_DOORBELL_NR (16)
+#define PCI_MSI_DOORBELL_END (32)
+#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
+
+static void __iomem *per_cpu_int_base;
+static void __iomem *main_int_base;
+static struct irq_domain *armada_370_xp_mpic_domain;
+static u32 doorbell_mask_reg;
+static int parent_irq;
+#ifdef CONFIG_PCI_MSI
+static struct irq_domain *armada_370_xp_msi_domain;
+static struct irq_domain *armada_370_xp_msi_inner_domain;
+static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DEFINE_MUTEX(msi_used_lock);
+static phys_addr_t msi_doorbell_addr;
+#endif
+
+static inline bool is_percpu_irq(irq_hw_number_t irq)
+{
+ if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ return true;
+
+ return false;
+}
+
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrupts, mask/unmask the calling CPU's bit
+ */
+static void armada_370_xp_irq_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (!is_percpu_irq(hwirq))
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_SET_MASK_OFFS);
+}
+
+static void armada_370_xp_irq_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (!is_percpu_irq(hwirq))
+ writel(hwirq, main_int_base +
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ else
+ writel(hwirq, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+#ifdef CONFIG_PCI_MSI
+
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info armada_370_xp_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &armada_370_xp_msi_irq_chip,
+};
+
+static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+
+ msg->address_lo = lower_32_bits(msi_doorbell_addr);
+ msg->address_hi = upper_32_bits(msi_doorbell_addr);
+ msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
+}
+
+static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
+ .irq_set_affinity = armada_370_xp_msi_set_affinity,
+};
+
+static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int hwirq, i;
+
+ mutex_lock(&msi_used_lock);
+ hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+ order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &armada_370_xp_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void armada_370_xp_msi_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&msi_used_lock);
+ bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
+ .alloc = armada_370_xp_msi_alloc,
+ .free = armada_370_xp_msi_free,
+};
+
+static void armada_370_xp_msi_reenable_percpu(void)
+{
+ u32 reg;
+
+ /* Enable MSI doorbell mask and combined cpu local interrupt */
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+ | PCI_MSI_DOORBELL_MASK;
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ /* Unmask local doorbell interrupt */
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ msi_doorbell_addr = main_int_phys_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS;
+
+ armada_370_xp_msi_inner_domain =
+ irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ &armada_370_xp_msi_domain_ops, NULL);
+ if (!armada_370_xp_msi_inner_domain)
+ return -ENOMEM;
+
+ armada_370_xp_msi_domain =
+ pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &armada_370_xp_msi_domain_info,
+ armada_370_xp_msi_inner_domain);
+ if (!armada_370_xp_msi_domain) {
+ irq_domain_remove(armada_370_xp_msi_inner_domain);
+ return -ENOMEM;
+ }
+
+ armada_370_xp_msi_reenable_percpu();
+
+ return 0;
+}
+#else
+static void armada_370_xp_msi_reenable_percpu(void) {}
+
+static inline int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+{
+ return 0;
+}
+#endif
+
+static void armada_xp_mpic_perf_init(void)
+{
+ unsigned long cpuid;
+
+ /*
+ * This Performance Counter Overflow interrupt is specific for
+ * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
+ */
+ if (!of_machine_is_compatible("marvell,armada-370-xp"))
+ return;
+
+ cpuid = cpu_logical_map(smp_processor_id());
+
+ /* Enable Performance Counter Overflow interrupts */
+ writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
+ per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
+}
+
+#ifdef CONFIG_SMP
+static struct irq_domain *ipi_domain;
+
+static void armada_370_xp_ipi_mask(struct irq_data *d)
+{
+ u32 reg;
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg &= ~BIT(d->hwirq);
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+}
+
+static void armada_370_xp_ipi_unmask(struct irq_data *d)
+{
+ u32 reg;
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg |= BIT(d->hwirq);
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+}
+
+static void armada_370_xp_ipi_send_mask(struct irq_data *d,
+ const struct cpumask *mask)
+{
+ unsigned long map = 0;
+ int cpu;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* submit softirq */
+ writel((map << 8) | d->hwirq, main_int_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+static void armada_370_xp_ipi_ack(struct irq_data *d)
+{
+ writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+}
+
+static struct irq_chip ipi_irqchip = {
+ .name = "IPI",
+ .irq_ack = armada_370_xp_ipi_ack,
+ .irq_mask = armada_370_xp_ipi_mask,
+ .irq_unmask = armada_370_xp_ipi_unmask,
+ .ipi_send_mask = armada_370_xp_ipi_send_mask,
+};
+
+static int armada_370_xp_ipi_alloc(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_set_percpu_devid(virq + i);
+ irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
+ d->host_data,
+ handle_percpu_devid_irq,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void armada_370_xp_ipi_free(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Not freeing IPIs */
+}
+
+static const struct irq_domain_ops ipi_domain_ops = {
+ .alloc = armada_370_xp_ipi_alloc,
+ .free = armada_370_xp_ipi_free,
+};
+
+static void ipi_resume(void)
+{
+ int i;
+
+ for (i = 0; i < IPI_DOORBELL_END; i++) {
+ int irq;
+
+ irq = irq_find_mapping(ipi_domain, i);
+ if (irq <= 0)
+ continue;
+ if (irq_percpu_is_enabled(irq)) {
+ struct irq_data *d;
+ d = irq_domain_get_irq_data(ipi_domain, irq);
+ armada_370_xp_ipi_unmask(d);
+ }
+ }
+}
+
+static __init void armada_xp_ipi_init(struct device_node *node)
+{
+ int base_ipi;
+
+ ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ IPI_DOORBELL_END,
+ &ipi_domain_ops, NULL);
+ if (WARN_ON(!ipi_domain))
+ return;
+
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+ base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END,
+ NUMA_NO_NODE, NULL, false, NULL);
+ if (WARN_ON(!base_ipi))
+ return;
+
+ set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
+}
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+static int armada_xp_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long reg, mask;
+ int cpu;
+
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ mask = 1UL << cpu_logical_map(cpu);
+
+ raw_spin_lock(&irq_controller_lock);
+ reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+ reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
+ writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+ raw_spin_unlock(&irq_controller_lock);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void armada_xp_mpic_smp_cpu_init(void)
+{
+ u32 control;
+ int nr_irqs, i;
+
+ control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+ nr_irqs = (control >> 2) & 0x3ff;
+
+ for (i = 0; i < nr_irqs; i++)
+ writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+
+ /* Disable all IPIs */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ /* Clear pending IPIs */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Unmask IPI interrupt */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static void armada_xp_mpic_reenable_percpu(void)
+{
+ unsigned int irq;
+
+ /* Re-enable per-CPU interrupts that were enabled before suspend */
+ for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ if (!irq_percpu_is_enabled(virq))
+ continue;
+
+ armada_370_xp_irq_unmask(data);
+ }
+
+ ipi_resume();
+
+ armada_370_xp_msi_reenable_percpu();
+}
+
+static int armada_xp_mpic_starting_cpu(unsigned int cpu)
+{
+ armada_xp_mpic_perf_init();
+ armada_xp_mpic_smp_cpu_init();
+ armada_xp_mpic_reenable_percpu();
+ return 0;
+}
+
+static int mpic_cascaded_starting_cpu(unsigned int cpu)
+{
+ armada_xp_mpic_perf_init();
+ armada_xp_mpic_reenable_percpu();
+ enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+ return 0;
+}
+#else
+static void armada_xp_mpic_smp_cpu_init(void) {}
+static void ipi_resume(void) {}
+#endif
+
+static struct irq_chip armada_370_xp_irq_chip = {
+ .name = "MPIC",
+ .irq_mask = armada_370_xp_irq_mask,
+ .irq_mask_ack = armada_370_xp_irq_mask,
+ .irq_unmask = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = armada_xp_set_affinity,
+#endif
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ if (!is_percpu_irq(hw))
+ writel(hw, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ else
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ if (is_percpu_irq(hw)) {
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_percpu_devid_irq);
+ } else {
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_level_irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ }
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+ .map = armada_370_xp_mpic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+#ifdef CONFIG_PCI_MSI
+static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+{
+ u32 msimask, msinr;
+
+ msimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & PCI_MSI_DOORBELL_MASK;
+
+ writel(~msimask, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ for (msinr = PCI_MSI_DOORBELL_START;
+ msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ unsigned int irq;
+
+ if (!(msimask & BIT(msinr)))
+ continue;
+
+ irq = msinr - PCI_MSI_DOORBELL_START;
+
+ generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
+ }
+}
+#else
+static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long irqmap, irqn, irqsrc, cpuid;
+
+ chained_irq_enter(chip, desc);
+
+ irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+ cpuid = cpu_logical_map(smp_processor_id());
+
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+ irqsrc = readl_relaxed(main_int_base +
+ ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+
+ /* Check if the interrupt is not masked on current CPU.
+ * Test IRQ (0-1) and FIQ (8-9) mask bits.
+ */
+ if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
+ continue;
+
+ if (irqn == 1) {
+ armada_370_xp_handle_msi_irq(NULL, true);
+ continue;
+ }
+
+ generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __exception_irq_entry
+armada_370_xp_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+
+ do {
+ irqstat = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_CPU_INTACK_OFFS);
+ irqnr = irqstat & 0x3FF;
+
+ if (irqnr > 1022)
+ break;
+
+ if (irqnr > 1) {
+ generic_handle_domain_irq(armada_370_xp_mpic_domain,
+ irqnr);
+ continue;
+ }
+
+ /* MSI handling */
+ if (irqnr == 1)
+ armada_370_xp_handle_msi_irq(regs, false);
+
+#ifdef CONFIG_SMP
+ /* IPI Handling */
+ if (irqnr == 0) {
+ unsigned long ipimask;
+ int ipi;
+
+ ipimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & IPI_DOORBELL_MASK;
+
+ for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
+ generic_handle_domain_irq(ipi_domain, ipi);
+ }
+#endif
+
+ } while (1);
+}
+
+static int armada_370_xp_mpic_suspend(void)
+{
+ doorbell_mask_reg = readl(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ return 0;
+}
+
+static void armada_370_xp_mpic_resume(void)
+{
+ int nirqs;
+ irq_hw_number_t irq;
+
+ /* Re-enable interrupts */
+ nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
+ for (irq = 0; irq < nirqs; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ if (!is_percpu_irq(irq)) {
+ /* Non per-CPU interrupts */
+ writel(irq, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ if (!irqd_irq_disabled(data))
+ armada_370_xp_irq_unmask(data);
+ } else {
+ /* Per-CPU interrupts */
+ writel(irq, main_int_base +
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+
+ /*
+ * Re-enable on the current CPU,
+ * armada_xp_mpic_reenable_percpu() will take
+ * care of secondary CPUs when they come up.
+ */
+ if (irq_percpu_is_enabled(virq))
+ armada_370_xp_irq_unmask(data);
+ }
+ }
+
+ /* Reconfigure doorbells for IPIs and MSIs */
+ writel(doorbell_mask_reg,
+ per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ if (doorbell_mask_reg & IPI_DOORBELL_MASK)
+ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+ ipi_resume();
+}
+
+static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
+ .suspend = armada_370_xp_mpic_suspend,
+ .resume = armada_370_xp_mpic_resume,
+};
+
+static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource main_int_res, per_cpu_int_res;
+ int nr_irqs, i;
+ u32 control;
+
+ BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+ BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
+
+ BUG_ON(!request_mem_region(main_int_res.start,
+ resource_size(&main_int_res),
+ node->full_name));
+ BUG_ON(!request_mem_region(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res),
+ node->full_name));
+
+ main_int_base = ioremap(main_int_res.start,
+ resource_size(&main_int_res));
+ BUG_ON(!main_int_base);
+
+ per_cpu_int_base = ioremap(per_cpu_int_res.start,
+ resource_size(&per_cpu_int_res));
+ BUG_ON(!per_cpu_int_base);
+
+ control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+ nr_irqs = (control >> 2) & 0x3ff;
+
+ for (i = 0; i < nr_irqs; i++)
+ writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+
+ armada_370_xp_mpic_domain =
+ irq_domain_add_linear(node, nr_irqs,
+ &armada_370_xp_mpic_irq_ops, NULL);
+ BUG_ON(!armada_370_xp_mpic_domain);
+ irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+
+ /* Setup for the boot CPU */
+ armada_xp_mpic_perf_init();
+ armada_xp_mpic_smp_cpu_init();
+
+ armada_370_xp_msi_init(node, main_int_res.start);
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ irq_set_default_host(armada_370_xp_mpic_domain);
+ set_handle_irq(armada_370_xp_handle_irq);
+#ifdef CONFIG_SMP
+ armada_xp_ipi_init(node);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "irqchip/armada/ipi:starting",
+ armada_xp_mpic_starting_cpu, NULL);
+#endif
+ } else {
+#ifdef CONFIG_SMP
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "irqchip/armada/cascade:starting",
+ mpic_cascaded_starting_cpu, NULL);
+#endif
+ irq_set_chained_handler(parent_irq,
+ armada_370_xp_mpic_handle_cascade_irq);
+ }
+
+ register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
new file mode 100644
index 000000000..9c9fc3e29
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Aspeed 24XX/25XX I2C Interrupt Controller.
+ *
+ * Copyright (C) 2012-2017 ASPEED Technology Inc.
+ * Copyright 2017 IBM Corporation
+ * Copyright 2017 Google, Inc.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+
+#define ASPEED_I2C_IC_NUM_BUS 14
+
+struct aspeed_i2c_ic {
+ void __iomem *base;
+ int parent_irq;
+ struct irq_domain *irq_domain;
+};
+
+/*
+ * The aspeed chip provides a single hardware interrupt for all of the I2C
+ * busses, so we use a dummy interrupt chip to translate this single interrupt
+ * into multiple interrupts, each associated with a single I2C bus.
+ */
+static void aspeed_i2c_ic_irq_handler(struct irq_desc *desc)
+{
+ struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long bit, status;
+
+ chained_irq_enter(chip, desc);
+ status = readl(i2c_ic->base);
+ for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS)
+ generic_handle_domain_irq(i2c_ic->irq_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+/*
+ * Set simple handler and mark IRQ as valid. Nothing interesting to do here
+ * since we are using a dummy interrupt chip.
+ */
+static int aspeed_i2c_ic_map_irq_domain(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_i2c_ic_irq_domain_ops = {
+ .map = aspeed_i2c_ic_map_irq_domain,
+};
+
+static int __init aspeed_i2c_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_i2c_ic *i2c_ic;
+ int ret = 0;
+
+ i2c_ic = kzalloc(sizeof(*i2c_ic), GFP_KERNEL);
+ if (!i2c_ic)
+ return -ENOMEM;
+
+ i2c_ic->base = of_iomap(node, 0);
+ if (!i2c_ic->base) {
+ ret = -ENOMEM;
+ goto err_free_ic;
+ }
+
+ i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
+ if (!i2c_ic->parent_irq) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS,
+ &aspeed_i2c_ic_irq_domain_ops,
+ NULL);
+ if (!i2c_ic->irq_domain) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ i2c_ic->irq_domain->name = "aspeed-i2c-domain";
+
+ irq_set_chained_handler_and_data(i2c_ic->parent_irq,
+ aspeed_i2c_ic_irq_handler, i2c_ic);
+
+ pr_info("i2c controller registered, irq %d\n", i2c_ic->parent_irq);
+
+ return 0;
+
+err_iounmap:
+ iounmap(i2c_ic->base);
+err_free_ic:
+ kfree(i2c_ic);
+ return ret;
+}
+
+IRQCHIP_DECLARE(ast2400_i2c_ic, "aspeed,ast2400-i2c-ic", aspeed_i2c_ic_of_init);
+IRQCHIP_DECLARE(ast2500_i2c_ic, "aspeed,ast2500-i2c-ic", aspeed_i2c_ic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
new file mode 100644
index 000000000..279e92cf0
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Copyright 2019 IBM Corporation
+ *
+ * Eddie James <eajames@linux.ibm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#define ASPEED_SCU_IC_REG 0x018
+#define ASPEED_SCU_IC_SHIFT 0
+#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_NUM_IRQS 7
+#define ASPEED_SCU_IC_STATUS_SHIFT 16
+
+#define ASPEED_AST2600_SCU_IC0_REG 0x560
+#define ASPEED_AST2600_SCU_IC0_SHIFT 0
+#define ASPEED_AST2600_SCU_IC0_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
+#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+
+#define ASPEED_AST2600_SCU_IC1_REG 0x570
+#define ASPEED_AST2600_SCU_IC1_SHIFT 4
+#define ASPEED_AST2600_SCU_IC1_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+
+struct aspeed_scu_ic {
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned int reg;
+ struct regmap *scu;
+ struct irq_domain *irq_domain;
+};
+
+static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+{
+ unsigned int sts;
+ unsigned long bit;
+ unsigned long enabled;
+ unsigned long max;
+ unsigned long status;
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * The SCU IC has just one register to control its operation and read
+ * status. The interrupt enable bits occupy the lower 16 bits of the
+ * register, while the interrupt status bits occupy the upper 16 bits.
+ * The status bit for a given interrupt is always 16 bits shifted from
+ * the enable bit for the same interrupt.
+ * Therefore, perform the IRQ operations in the enable bit space by
+ * shifting the status down to get the mapping and then back up to
+ * clear the bit.
+ */
+ regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ enabled = sts & scu_ic->irq_enable;
+ status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ generic_handle_domain_irq(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
+
+ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the mask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+}
+
+static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the unmask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+}
+
+static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip aspeed_scu_ic_chip = {
+ .name = "aspeed-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask,
+ .irq_unmask = aspeed_scu_ic_irq_unmask,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
+ .map = aspeed_scu_ic_map,
+};
+
+static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+ struct device_node *node)
+{
+ int irq;
+ int rc = 0;
+
+ if (!node->parent) {
+ rc = -ENODEV;
+ goto err;
+ }
+
+ scu_ic->scu = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(scu_ic->scu)) {
+ rc = PTR_ERR(scu_ic->scu);
+ goto err;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ &aspeed_scu_ic_domain_ops,
+ scu_ic);
+ if (!scu_ic->irq_domain) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ scu_ic);
+
+ return 0;
+
+err:
+ kfree(scu_ic);
+
+ return rc;
+}
+
+static int __init aspeed_scu_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
+ scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
+ scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
+ scu_ic->reg = ASPEED_SCU_IC_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+ aspeed_ast2600_scu_ic0_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+ aspeed_ast2600_scu_ic1_of_init);
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
new file mode 100644
index 000000000..62ccf2c0c
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp.
+ *
+ * Driver for Aspeed "new" VIC as found in SoC generation 3 and later
+ *
+ * Based on irq-vic.c:
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+/* These definitions correspond to the "new mapping" of the
+ * register set that interleaves "high" and "low". The offsets
+ * below are for the "low" register, add 4 to get to the high one
+ */
+#define AVIC_IRQ_STATUS 0x00
+#define AVIC_FIQ_STATUS 0x08
+#define AVIC_RAW_STATUS 0x10
+#define AVIC_INT_SELECT 0x18
+#define AVIC_INT_ENABLE 0x20
+#define AVIC_INT_ENABLE_CLR 0x28
+#define AVIC_INT_TRIGGER 0x30
+#define AVIC_INT_TRIGGER_CLR 0x38
+#define AVIC_INT_SENSE 0x40
+#define AVIC_INT_DUAL_EDGE 0x48
+#define AVIC_INT_EVENT 0x50
+#define AVIC_EDGE_CLR 0x58
+#define AVIC_EDGE_STATUS 0x60
+
+#define NUM_IRQS 64
+
+struct aspeed_vic {
+ void __iomem *base;
+ u32 edge_sources[2];
+ struct irq_domain *dom;
+};
+static struct aspeed_vic *system_avic;
+
+static void vic_init_hw(struct aspeed_vic *vic)
+{
+ u32 sense;
+
+ /* Disable all interrupts */
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4);
+
+ /* Make sure no soft trigger is on */
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4);
+
+ /* Set everything to be IRQ */
+ writel(0, vic->base + AVIC_INT_SELECT);
+ writel(0, vic->base + AVIC_INT_SELECT + 4);
+
+ /* Some interrupts have a programmable high/low level trigger
+ * (4 GPIO direct inputs), for now we assume this was configured
+ * by firmware. We read which ones are edge now.
+ */
+ sense = readl(vic->base + AVIC_INT_SENSE);
+ vic->edge_sources[0] = ~sense;
+ sense = readl(vic->base + AVIC_INT_SENSE + 4);
+ vic->edge_sources[1] = ~sense;
+
+ /* Clear edge detection latches */
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR);
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4);
+}
+
+static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
+{
+ struct aspeed_vic *vic = system_avic;
+ u32 stat, irq;
+
+ for (;;) {
+ irq = 0;
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS);
+ if (!stat) {
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4);
+ irq = 32;
+ }
+ if (stat == 0)
+ break;
+ irq += ffs(stat) - 1;
+ generic_handle_domain_irq(vic->dom, irq);
+ }
+}
+
+static void avic_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* Clear edge latch for edge interrupts, nop for level */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static void avic_mask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+}
+
+static void avic_unmask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4);
+}
+
+/* For level irq, faster than going through a nop "ack" and mask */
+static void avic_mask_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* First mask */
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+
+ /* Then clear edge latch for edge interrupts */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static struct irq_chip avic_chip = {
+ .name = "AVIC",
+ .irq_ack = avic_ack_irq,
+ .irq_mask = avic_mask_irq,
+ .irq_unmask = avic_unmask_irq,
+ .irq_mask_ack = avic_mask_ack_irq,
+};
+
+static int avic_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct aspeed_vic *vic = d->host_data;
+ unsigned int sidx = hwirq >> 5;
+ unsigned int sbit = 1u << (hwirq & 0x1f);
+
+ /* Check if interrupt exists */
+ if (sidx > 1)
+ return -EPERM;
+
+ if (vic->edge_sources[sidx] & sbit)
+ irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq);
+ else
+ irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq);
+ irq_set_chip_data(irq, vic);
+ irq_set_probe(irq);
+ return 0;
+}
+
+static const struct irq_domain_ops avic_dom_ops = {
+ .map = avic_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init avic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *regs;
+ struct aspeed_vic *vic;
+
+ if (WARN(parent, "non-root Aspeed VIC not supported"))
+ return -EINVAL;
+ if (WARN(system_avic, "duplicate Aspeed VIC not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL);
+ if (WARN_ON(!vic)) {
+ iounmap(regs);
+ return -ENOMEM;
+ }
+ vic->base = regs;
+
+ /* Initialize sources, all masked */
+ vic_init_hw(vic);
+
+ /* Ready to receive interrupts */
+ system_avic = vic;
+ set_handle_irq(avic_handle_irq);
+
+ /* Register our domain */
+ vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
+ &avic_dom_ops, vic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(ast2400_vic, "aspeed,ast2400-vic", avic_of_init);
+IRQCHIP_DECLARE(ast2500_vic, "aspeed,ast2500-vic", avic_of_init);
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 000000000..923e4bba3
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
+ */
+
+static unsigned irq_wb_chan[8] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+ int irq;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
+
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ irq = fls(pending) - 1;
+ if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+ ath79_ddr_wb_flush(irq_wb_chan[irq]);
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ pending &= ~BIT(irq);
+ }
+}
+
+static int __init ar79_cpu_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ int err, i, count;
+
+ /* Fill the irq_wb_chan table */
+ count = of_count_phandle_with_args(
+ node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
+
+ for (i = 0; i < count; i++) {
+ struct of_phandle_args args;
+ u32 irq = i;
+
+ of_property_read_u32_index(
+ node, "qca,ddr-wb-channel-interrupts", i, &irq);
+ if (irq >= ARRAY_SIZE(irq_wb_chan))
+ continue;
+
+ err = of_parse_phandle_with_args(
+ node, "qca,ddr-wb-channels",
+ "#qca,ddr-wb-channel-cells",
+ i, &args);
+ if (err)
+ return err;
+
+ irq_wb_chan[irq] = args.args[0];
+ }
+
+ return mips_cpu_irq_of_init(node, parent);
+}
+IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
+ ar79_cpu_intc_of_init);
+
+void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
+{
+ irq_wb_chan[2] = irq_wb_chan2;
+ irq_wb_chan[3] = irq_wb_chan3;
+ mips_cpu_irq_init();
+}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 000000000..92f001a5f
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71xx/AR724x/AR913x MISC interrupt controller
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define AR71XX_RESET_REG_MISC_INT_STATUS 0
+#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
+
+#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_PERF_IRQ 5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+ return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+static void ath79_misc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *base = domain->host_data;
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ if (!pending) {
+ spurious_interrupt();
+ chained_irq_exit(chip, desc);
+ return;
+ }
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_domain_irq(domain, bit);
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ar71xx_misc_irq_unmask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+ .name = "MISC",
+ .irq_unmask = ar71xx_misc_irq_unmask,
+ .irq_mask = ar71xx_misc_irq_mask,
+};
+
+static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops misc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = misc_map,
+};
+
+static void __init ath79_misc_intc_domain_init(
+ struct irq_domain *domain, int irq)
+{
+ void __iomem *base = domain->host_data;
+
+ ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
+ /* Disable and clear all interrupts */
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
+
+static int __init ath79_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ void __iomem *base;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ pr_err("Failed to get MISC IRQ\n");
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("Failed to get MISC IRQ registers\n");
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+ &misc_irq_domain_ops, base);
+ if (!domain) {
+ pr_err("Failed to add MISC irqdomain\n");
+ return -EINVAL;
+ }
+
+ ath79_misc_intc_domain_init(domain, irq);
+ return 0;
+}
+
+static int __init ar7100_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+ ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+ ar7240_misc_intc_of_init);
+
+void __init ath79_misc_irq_init(void __iomem *regs, int irq,
+ int irq_base, bool is_ar71xx)
+{
+ struct irq_domain *domain;
+
+ if (is_ar71xx)
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ else
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+
+ domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+ irq_base, 0, &misc_irq_domain_ops, regs);
+ if (!domain)
+ panic("Failed to create MISC irqdomain");
+
+ ath79_misc_intc_domain_init(domain, irq);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
new file mode 100644
index 000000000..072bd227b
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -0,0 +1,275 @@
+/*
+ * Atmel AT91 common AIC (Advanced Interrupt Controller) code shared by
+ * irq-atmel-aic and irq-atmel-aic5 drivers
+ *
+ * Copyright (C) 2004 SAN People
+ * Copyright (C) 2004 ATMEL
+ * Copyright (C) Rick Bronson
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "irq-atmel-aic-common.h"
+
+#define AT91_AIC_PRIOR GENMASK(2, 0)
+#define AT91_AIC_IRQ_MIN_PRIORITY 0
+#define AT91_AIC_IRQ_MAX_PRIORITY 7
+
+#define AT91_AIC_SRCTYPE GENMASK(6, 5)
+#define AT91_AIC_SRCTYPE_LOW (0 << 5)
+#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
+#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
+#define AT91_AIC_SRCTYPE_RISING (3 << 5)
+
+struct aic_chip_data {
+ u32 ext_irqs;
+};
+
+static void aic_common_shutdown(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_mask(d);
+}
+
+int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct aic_chip_data *aic = gc->private;
+ unsigned aic_type;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ aic_type = AT91_AIC_SRCTYPE_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ aic_type = AT91_AIC_SRCTYPE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ if (!(d->mask & aic->ext_irqs))
+ return -EINVAL;
+
+ aic_type = AT91_AIC_SRCTYPE_LOW;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ if (!(d->mask & aic->ext_irqs))
+ return -EINVAL;
+
+ aic_type = AT91_AIC_SRCTYPE_FALLING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val &= ~AT91_AIC_SRCTYPE;
+ *val |= aic_type;
+
+ return 0;
+}
+
+void aic_common_set_priority(int priority, unsigned *val)
+{
+ *val &= ~AT91_AIC_PRIOR;
+ *val |= priority;
+}
+
+int aic_common_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec,
+ unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 3))
+ return -EINVAL;
+
+ if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY) ||
+ (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
+{
+ struct device_node *node = irq_domain_get_of_node(domain);
+ struct irq_chip_generic *gc;
+ struct aic_chip_data *aic;
+ struct property *prop;
+ const __be32 *p;
+ u32 hwirq;
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+
+ aic = gc->private;
+ aic->ext_irqs |= 1;
+
+ of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
+ gc = irq_get_domain_generic_chip(domain, hwirq);
+ if (!gc) {
+ pr_warn("AIC: external irq %d >= %d skip it\n",
+ hwirq, domain->revmap_size);
+ continue;
+ }
+
+ aic = gc->private;
+ aic->ext_irqs |= (1 << (hwirq % 32));
+ }
+}
+
+#define AT91_RTC_IDR 0x24
+#define AT91_RTC_IMR 0x28
+#define AT91_RTC_IRQ_MASK 0x1f
+
+void __init aic_common_rtc_irq_fixup(void)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9x5-rtc");
+
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs)
+ return;
+
+ writel(AT91_RTC_IRQ_MASK, regs + AT91_RTC_IDR);
+
+ iounmap(regs);
+}
+
+#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
+#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
+#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
+
+void __init aic_common_rtt_irq_fixup(void)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ /*
+ * The at91sam9263 SoC has 2 instances of the RTT block, hence we
+ * iterate over the DT to find each occurrence.
+ */
+ for_each_compatible_node(np, NULL, "atmel,at91sam9260-rtt") {
+ regs = of_iomap(np, 0);
+ if (!regs)
+ continue;
+
+ writel(readl(regs + AT91_RTT_MR) &
+ ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN),
+ regs + AT91_RTT_MR);
+
+ iounmap(regs);
+ }
+}
+
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
+{
+ struct device_node *root = of_find_node_by_path("/");
+ const struct of_device_id *match;
+
+ if (!root)
+ return;
+
+ match = of_match_node(matches, root);
+
+ if (match) {
+ void (*fixup)(void) = match->data;
+ fixup();
+ }
+
+ of_node_put(root);
+}
+
+struct irq_domain *__init aic_common_of_init(struct device_node *node,
+ const struct irq_domain_ops *ops,
+ const char *name, int nirqs,
+ const struct of_device_id *matches)
+{
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ struct aic_chip_data *aic;
+ void __iomem *reg_base;
+ int nchips;
+ int ret;
+ int i;
+
+ nchips = DIV_ROUND_UP(nirqs, 32);
+
+ reg_base = of_iomap(node, 0);
+ if (!reg_base)
+ return ERR_PTR(-ENOMEM);
+
+ aic = kcalloc(nchips, sizeof(*aic), GFP_KERNEL);
+ if (!aic) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto err_free_aic;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
+ handle_fasteoi_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE |
+ IRQ_NOAUTOEN, 0, 0);
+ if (ret)
+ goto err_domain_remove;
+
+ for (i = 0; i < nchips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ gc->reg_base = reg_base;
+
+ gc->unused = 0;
+ gc->wake_enabled = ~0;
+ gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
+ gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
+ gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+ gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
+ gc->private = &aic[i];
+ }
+
+ aic_common_ext_irq_of_init(domain);
+ aic_common_irq_fixup(matches);
+
+ return domain;
+
+err_domain_remove:
+ irq_domain_remove(domain);
+
+err_free_aic:
+ kfree(aic);
+
+err_iounmap:
+ iounmap(reg_base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
new file mode 100644
index 000000000..242e62c18
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -0,0 +1,40 @@
+/*
+ * Atmel AT91 common AIC (Advanced Interrupt Controller) header file
+ *
+ * Copyright (C) 2004 SAN People
+ * Copyright (C) 2004 ATMEL
+ * Copyright (C) Rick Bronson
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __IRQ_ATMEL_AIC_COMMON_H
+#define __IRQ_ATMEL_AIC_COMMON_H
+
+
+int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
+
+void aic_common_set_priority(int priority, unsigned *val);
+
+int aic_common_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec,
+ unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type);
+
+struct irq_domain *__init aic_common_of_init(struct device_node *node,
+ const struct irq_domain_ops *ops,
+ const char *name, int nirqs,
+ const struct of_device_id *matches);
+
+void __init aic_common_rtc_irq_fixup(void);
+
+void __init aic_common_rtt_irq_fixup(void);
+
+#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
new file mode 100644
index 000000000..4631f6847
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -0,0 +1,274 @@
+/*
+ * Atmel AT91 AIC (Advanced Interrupt Controller) driver
+ *
+ * Copyright (C) 2004 SAN People
+ * Copyright (C) 2004 ATMEL
+ * Copyright (C) Rick Bronson
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irq-atmel-aic-common.h"
+
+/* Number of irq lines managed by AIC */
+#define NR_AIC_IRQS 32
+
+#define AT91_AIC_SMR(n) ((n) * 4)
+
+#define AT91_AIC_SVR(n) (0x80 + ((n) * 4))
+#define AT91_AIC_IVR 0x100
+#define AT91_AIC_FVR 0x104
+#define AT91_AIC_ISR 0x108
+
+#define AT91_AIC_IPR 0x10c
+#define AT91_AIC_IMR 0x110
+#define AT91_AIC_CISR 0x114
+
+#define AT91_AIC_IECR 0x120
+#define AT91_AIC_IDCR 0x124
+#define AT91_AIC_ICCR 0x128
+#define AT91_AIC_ISCR 0x12c
+#define AT91_AIC_EOICR 0x130
+#define AT91_AIC_SPU 0x134
+#define AT91_AIC_DCR 0x138
+
+static struct irq_domain *aic_domain;
+
+static asmlinkage void __exception_irq_entry
+aic_handle(struct pt_regs *regs)
+{
+ struct irq_domain_chip_generic *dgc = aic_domain->gc;
+ struct irq_chip_generic *gc = dgc->gc[0];
+ u32 irqnr;
+ u32 irqstat;
+
+ irqnr = irq_reg_readl(gc, AT91_AIC_IVR);
+ irqstat = irq_reg_readl(gc, AT91_AIC_ISR);
+
+ if (!irqstat)
+ irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+ else
+ generic_handle_domain_irq(aic_domain, irqnr);
+}
+
+static int aic_retrigger(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ /* Enable interrupt on AIC5 */
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
+ irq_gc_unlock(gc);
+
+ return 1;
+}
+
+static int aic_set_type(struct irq_data *d, unsigned type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ unsigned int smr;
+ int ret;
+
+ smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq));
+ ret = aic_common_set_type(d, type, &smr);
+ if (ret)
+ return ret;
+
+ irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void aic_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
+ irq_gc_unlock(gc);
+}
+
+static void aic_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
+ irq_gc_unlock(gc);
+}
+
+static void aic_pm_shutdown(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+ irq_gc_unlock(gc);
+}
+#else
+#define aic_suspend NULL
+#define aic_resume NULL
+#define aic_pm_shutdown NULL
+#endif /* CONFIG_PM */
+
+static void __init aic_hw_init(struct irq_domain *domain)
+{
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ int i;
+
+ /*
+ * Perform 8 End Of Interrupt Command to make sure AIC
+ * will not Lock out nIRQ
+ */
+ for (i = 0; i < 8; i++)
+ irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register.
+ * When there is no current interrupt, the IRQ Vector Register
+ * reads the value stored in AIC_SPU
+ */
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+ irq_reg_writel(gc, 0, AT91_AIC_DCR);
+
+ /* Disable and clear all interrupts initially */
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+
+ for (i = 0; i < 32; i++)
+ irq_reg_writel(gc, i, AT91_AIC_SVR(i));
+}
+
+static int aic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct irq_domain_chip_generic *dgc = d->gc;
+ struct irq_chip_generic *gc;
+ unsigned long flags;
+ unsigned smr;
+ int idx;
+ int ret;
+
+ if (!dgc)
+ return -EINVAL;
+
+ ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
+ out_hwirq, out_type);
+ if (ret)
+ return ret;
+
+ idx = intspec[0] / dgc->irqs_per_chip;
+ if (idx >= dgc->num_chips)
+ return -EINVAL;
+
+ gc = dgc->gc[idx];
+
+ irq_gc_lock_irqsave(gc, flags);
+ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+ irq_gc_unlock_irqrestore(gc, flags);
+
+ return ret;
+}
+
+static const struct irq_domain_ops aic_irq_ops = {
+ .map = irq_map_generic_chip,
+ .xlate = aic_irq_domain_xlate,
+};
+
+static void __init at91rm9200_aic_irq_fixup(void)
+{
+ aic_common_rtc_irq_fixup();
+}
+
+static void __init at91sam9260_aic_irq_fixup(void)
+{
+ aic_common_rtt_irq_fixup();
+}
+
+static void __init at91sam9g45_aic_irq_fixup(void)
+{
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
+}
+
+static const struct of_device_id aic_irq_fixups[] __initconst = {
+ { .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9rl", .data = at91sam9g45_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9x5", .data = at91rm9200_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9260", .data = at91sam9260_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9261", .data = at91sam9260_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9263", .data = at91sam9260_aic_irq_fixup },
+ { .compatible = "atmel,at91sam9g20", .data = at91sam9260_aic_irq_fixup },
+ { /* sentinel */ },
+};
+
+static int __init aic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+
+ if (aic_domain)
+ return -EEXIST;
+
+ domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
+ NR_AIC_IRQS, aic_irq_fixups);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ aic_domain = domain;
+ gc = irq_get_domain_generic_chip(domain, 0);
+
+ gc->chip_types[0].regs.eoi = AT91_AIC_EOICR;
+ gc->chip_types[0].regs.enable = AT91_AIC_IECR;
+ gc->chip_types[0].regs.disable = AT91_AIC_IDCR;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].chip.irq_retrigger = aic_retrigger;
+ gc->chip_types[0].chip.irq_set_type = aic_set_type;
+ gc->chip_types[0].chip.irq_suspend = aic_suspend;
+ gc->chip_types[0].chip.irq_resume = aic_resume;
+ gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown;
+
+ aic_hw_init(domain);
+ set_handle_irq(aic_handle);
+
+ return 0;
+}
+IRQCHIP_DECLARE(at91rm9200_aic, "atmel,at91rm9200-aic", aic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
new file mode 100644
index 000000000..145535bd7
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -0,0 +1,408 @@
+/*
+ * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
+ *
+ * Copyright (C) 2004 SAN People
+ * Copyright (C) 2004 ATMEL
+ * Copyright (C) Rick Bronson
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irq-atmel-aic-common.h"
+
+/* Number of irq lines managed by AIC */
+#define NR_AIC5_IRQS 128
+
+#define AT91_AIC5_SSR 0x0
+#define AT91_AIC5_INTSEL_MSK (0x7f << 0)
+
+#define AT91_AIC5_SMR 0x4
+
+#define AT91_AIC5_SVR 0x8
+#define AT91_AIC5_IVR 0x10
+#define AT91_AIC5_FVR 0x14
+#define AT91_AIC5_ISR 0x18
+
+#define AT91_AIC5_IPR0 0x20
+#define AT91_AIC5_IPR1 0x24
+#define AT91_AIC5_IPR2 0x28
+#define AT91_AIC5_IPR3 0x2c
+#define AT91_AIC5_IMR 0x30
+#define AT91_AIC5_CISR 0x34
+
+#define AT91_AIC5_IECR 0x40
+#define AT91_AIC5_IDCR 0x44
+#define AT91_AIC5_ICCR 0x48
+#define AT91_AIC5_ISCR 0x4c
+#define AT91_AIC5_EOICR 0x38
+#define AT91_AIC5_SPU 0x3c
+#define AT91_AIC5_DCR 0x6c
+
+#define AT91_AIC5_FFER 0x50
+#define AT91_AIC5_FFDR 0x54
+#define AT91_AIC5_FFSR 0x58
+
+static struct irq_domain *aic5_domain;
+
+static asmlinkage void __exception_irq_entry
+aic5_handle(struct pt_regs *regs)
+{
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
+ u32 irqnr;
+ u32 irqstat;
+
+ irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
+ irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);
+
+ if (!irqstat)
+ irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
+ else
+ generic_handle_domain_irq(aic5_domain, irqnr);
+}
+
+static void aic5_mask(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Disable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+ gc->mask_cache &= ~d->mask;
+ irq_gc_unlock(bgc);
+}
+
+static void aic5_unmask(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Enable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+ irq_reg_writel(gc, 1, AT91_AIC5_IECR);
+ gc->mask_cache |= d->mask;
+ irq_gc_unlock(bgc);
+}
+
+static int aic5_retrigger(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+
+ /* Enable interrupt on AIC5 */
+ irq_gc_lock(bgc);
+ irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+ irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
+ irq_gc_unlock(bgc);
+
+ return 1;
+}
+
+static int aic5_set_type(struct irq_data *d, unsigned type)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ unsigned int smr;
+ int ret;
+
+ irq_gc_lock(bgc);
+ irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
+ smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
+ ret = aic_common_set_type(d, type, &smr);
+ if (!ret)
+ irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+ irq_gc_unlock(bgc);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static u32 *smr_cache;
+
+static void aic5_suspend(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_domain_chip_generic *dgc = domain->gc;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ int i;
+ u32 mask;
+
+ if (smr_cache)
+ for (i = 0; i < domain->revmap_size; i++) {
+ irq_reg_writel(bgc, i, AT91_AIC5_SSR);
+ smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
+ }
+
+ irq_gc_lock(bgc);
+ for (i = 0; i < dgc->irqs_per_chip; i++) {
+ mask = 1 << i;
+ if ((mask & gc->mask_cache) == (mask & gc->wake_active))
+ continue;
+
+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+ if (mask & gc->wake_active)
+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+ else
+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+ }
+ irq_gc_unlock(bgc);
+}
+
+static void aic5_resume(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_domain_chip_generic *dgc = domain->gc;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ int i;
+ u32 mask;
+
+ irq_gc_lock(bgc);
+
+ if (smr_cache) {
+ irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
+ for (i = 0; i < domain->revmap_size; i++) {
+ irq_reg_writel(bgc, i, AT91_AIC5_SSR);
+ irq_reg_writel(bgc, i, AT91_AIC5_SVR);
+ irq_reg_writel(bgc, smr_cache[i], AT91_AIC5_SMR);
+ }
+ }
+
+ for (i = 0; i < dgc->irqs_per_chip; i++) {
+ mask = 1 << i;
+
+ if (!smr_cache &&
+ ((mask & gc->mask_cache) == (mask & gc->wake_active)))
+ continue;
+
+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+ if (mask & gc->mask_cache)
+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+ else
+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+ }
+ irq_gc_unlock(bgc);
+}
+
+static void aic5_pm_shutdown(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct irq_domain_chip_generic *dgc = domain->gc;
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ int i;
+
+ irq_gc_lock(bgc);
+ for (i = 0; i < dgc->irqs_per_chip; i++) {
+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+ irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
+ }
+ irq_gc_unlock(bgc);
+}
+#else
+#define aic5_suspend NULL
+#define aic5_resume NULL
+#define aic5_pm_shutdown NULL
+#endif /* CONFIG_PM */
+
+static void __init aic5_hw_init(struct irq_domain *domain)
+{
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ int i;
+
+ /*
+ * Perform 8 End Of Interrupt Command to make sure AIC
+ * will not Lock out nIRQ
+ */
+ for (i = 0; i < 8; i++)
+ irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register.
+ * When there is no current interrupt, the IRQ Vector Register
+ * reads the value stored in AIC_SPU
+ */
+ irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+ irq_reg_writel(gc, 0, AT91_AIC5_DCR);
+
+ /* Disable and clear all interrupts initially */
+ for (i = 0; i < domain->revmap_size; i++) {
+ irq_reg_writel(gc, i, AT91_AIC5_SSR);
+ irq_reg_writel(gc, i, AT91_AIC5_SVR);
+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+ irq_reg_writel(gc, 1, AT91_AIC5_ICCR);
+ }
+}
+
+static int aic5_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+ unsigned long flags;
+ unsigned smr;
+ int ret;
+
+ if (!bgc)
+ return -EINVAL;
+
+ ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
+ out_hwirq, out_type);
+ if (ret)
+ return ret;
+
+ irq_gc_lock_irqsave(bgc, flags);
+ irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
+ smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+ irq_gc_unlock_irqrestore(bgc, flags);
+
+ return ret;
+}
+
+static const struct irq_domain_ops aic5_irq_ops = {
+ .map = irq_map_generic_chip,
+ .xlate = aic5_irq_domain_xlate,
+};
+
+static void __init sama5d3_aic_irq_fixup(void)
+{
+ aic_common_rtc_irq_fixup();
+}
+
+static void __init sam9x60_aic_irq_fixup(void)
+{
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
+}
+
+static const struct of_device_id aic5_irq_fixups[] __initconst = {
+ { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup },
+ { /* sentinel */ },
+};
+
+static int __init aic5_of_init(struct device_node *node,
+ struct device_node *parent,
+ int nirqs)
+{
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ int nchips;
+ int i;
+
+ if (nirqs > NR_AIC5_IRQS)
+ return -EINVAL;
+
+ if (aic5_domain)
+ return -EEXIST;
+
+ domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
+ nirqs, aic5_irq_fixups);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ aic5_domain = domain;
+ nchips = aic5_domain->revmap_size / 32;
+ for (i = 0; i < nchips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
+ gc->chip_types[0].chip.irq_mask = aic5_mask;
+ gc->chip_types[0].chip.irq_unmask = aic5_unmask;
+ gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
+ gc->chip_types[0].chip.irq_set_type = aic5_set_type;
+ gc->chip_types[0].chip.irq_suspend = aic5_suspend;
+ gc->chip_types[0].chip.irq_resume = aic5_resume;
+ gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
+ }
+
+ aic5_hw_init(domain);
+ set_handle_irq(aic5_handle);
+
+ return 0;
+}
+
+#define NR_SAMA5D2_IRQS 77
+
+static int __init sama5d2_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+#ifdef CONFIG_PM
+ smr_cache = kcalloc(DIV_ROUND_UP(NR_SAMA5D2_IRQS, 32) * 32,
+ sizeof(*smr_cache), GFP_KERNEL);
+ if (!smr_cache)
+ return -ENOMEM;
+#endif
+
+ return aic5_of_init(node, parent, NR_SAMA5D2_IRQS);
+}
+IRQCHIP_DECLARE(sama5d2_aic5, "atmel,sama5d2-aic", sama5d2_aic5_of_init);
+
+#define NR_SAMA5D3_IRQS 48
+
+static int __init sama5d3_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
+}
+IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
+
+#define NR_SAMA5D4_IRQS 68
+
+static int __init sama5d4_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
+}
+IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS 50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
new file mode 100644
index 000000000..e94e28822
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2010 Broadcom
+ * Copyright 2012 Simon Arlott, Chris Boot, Stephen Warren
+ *
+ * Quirk 1: Shortcut interrupts don't set the bank 1/2 register pending bits
+ *
+ * If an interrupt fires on bank 1 that isn't in the shortcuts list, bit 8
+ * on bank 0 is set to signify that an interrupt in bank 1 has fired, and
+ * to look in the bank 1 status register for more information.
+ *
+ * If an interrupt fires on bank 1 that _is_ in the shortcuts list, its
+ * shortcut bit in bank 0 is set as well as its interrupt bit in the bank 1
+ * status register, but bank 0 bit 8 is _not_ set.
+ *
+ * Quirk 2: You can't mask the register 1/2 pending interrupts
+ *
+ * In a proper cascaded interrupt controller, the interrupt lines with
+ * cascaded interrupt controllers on them are just normal interrupt lines.
+ * You can mask the interrupts and get on with things. With this controller
+ * you can't do that.
+ *
+ * Quirk 3: The shortcut interrupts can't be (un)masked in bank 0
+ *
+ * Those interrupts that have shortcuts can only be masked/unmasked in
+ * their respective banks' enable/disable registers. Doing so in the bank 0
+ * enable/disable registers has no effect.
+ *
+ * The FIQ control register:
+ * Bits 0-6: IRQ (index in order of interrupts from banks 1, 2, then 0)
+ * Bit 7: Enable FIQ generation
+ * Bits 8+: Unused
+ *
+ * An interrupt must be disabled before configuring it for FIQ generation
+ * otherwise both handlers will fire at the same time!
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/exception.h>
+
+/* Put the bank and irq (32 bits) into the hwirq */
+#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
+#define HWIRQ_BANK(i) (i >> 5)
+#define HWIRQ_BIT(i) BIT(i & 0x1f)
+
+#define NR_IRQS_BANK0 8
+#define BANK0_HWIRQ_MASK 0xff
+/* Shortcuts can't be disabled so any unknown new ones need to be masked */
+#define SHORTCUT1_MASK 0x00007c00
+#define SHORTCUT2_MASK 0x001f8000
+#define SHORTCUT_SHIFT 10
+#define BANK1_HWIRQ BIT(8)
+#define BANK2_HWIRQ BIT(9)
+#define BANK0_VALID_MASK (BANK0_HWIRQ_MASK | BANK1_HWIRQ | BANK2_HWIRQ \
+ | SHORTCUT1_MASK | SHORTCUT2_MASK)
+
+#define REG_FIQ_CONTROL 0x0c
+#define FIQ_CONTROL_ENABLE BIT(7)
+
+#define NR_BANKS 3
+#define IRQS_PER_BANK 32
+
+static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
+static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
+static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
+static const int bank_irqs[] __initconst = { 8, 32, 32 };
+
+static const int shortcuts[] = {
+ 7, 9, 10, 18, 19, /* Bank 1 */
+ 21, 22, 23, 24, 25, 30 /* Bank 2 */
+};
+
+struct armctrl_ic {
+ void __iomem *base;
+ void __iomem *pending[NR_BANKS];
+ void __iomem *enable[NR_BANKS];
+ void __iomem *disable[NR_BANKS];
+ struct irq_domain *domain;
+};
+
+static struct armctrl_ic intc __read_mostly;
+static void __exception_irq_entry bcm2835_handle_irq(
+ struct pt_regs *regs);
+static void bcm2836_chained_handle_irq(struct irq_desc *desc);
+
+static void armctrl_mask_irq(struct irq_data *d)
+{
+ writel_relaxed(HWIRQ_BIT(d->hwirq), intc.disable[HWIRQ_BANK(d->hwirq)]);
+}
+
+static void armctrl_unmask_irq(struct irq_data *d)
+{
+ writel_relaxed(HWIRQ_BIT(d->hwirq), intc.enable[HWIRQ_BANK(d->hwirq)]);
+}
+
+static struct irq_chip armctrl_chip = {
+ .name = "ARMCTRL-level",
+ .irq_mask = armctrl_mask_irq,
+ .irq_unmask = armctrl_unmask_irq
+};
+
+static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize != 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[0] >= NR_BANKS))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[1] >= IRQS_PER_BANK))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[0] == 0 && intspec[1] >= NR_IRQS_BANK0))
+ return -EINVAL;
+
+ *out_hwirq = MAKE_HWIRQ(intspec[0], intspec[1]);
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops armctrl_ops = {
+ .xlate = armctrl_xlate
+};
+
+static int __init armctrl_of_init(struct device_node *node,
+ struct device_node *parent,
+ bool is_2836)
+{
+ void __iomem *base;
+ int irq, b, i;
+ u32 reg;
+
+ base = of_iomap(node, 0);
+ if (!base)
+ panic("%pOF: unable to map IC registers\n", node);
+
+ intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
+ &armctrl_ops, NULL);
+ if (!intc.domain)
+ panic("%pOF: unable to create IRQ domain\n", node);
+
+ for (b = 0; b < NR_BANKS; b++) {
+ intc.pending[b] = base + reg_pending[b];
+ intc.enable[b] = base + reg_enable[b];
+ intc.disable[b] = base + reg_disable[b];
+
+ for (i = 0; i < bank_irqs[b]; i++) {
+ irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i));
+ BUG_ON(irq <= 0);
+ irq_set_chip_and_handler(irq, &armctrl_chip,
+ handle_level_irq);
+ irq_set_probe(irq);
+ }
+
+ reg = readl_relaxed(intc.enable[b]);
+ if (reg) {
+ writel_relaxed(reg, intc.disable[b]);
+ pr_err(FW_BUG "Bootloader left irq enabled: "
+ "bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
+ }
+ }
+
+ reg = readl_relaxed(base + REG_FIQ_CONTROL);
+ if (reg & FIQ_CONTROL_ENABLE) {
+ writel_relaxed(0, base + REG_FIQ_CONTROL);
+ pr_err(FW_BUG "Bootloader left fiq enabled\n");
+ }
+
+ if (is_2836) {
+ int parent_irq = irq_of_parse_and_map(node, 0);
+
+ if (!parent_irq) {
+ panic("%pOF: unable to get parent interrupt.\n",
+ node);
+ }
+ irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
+ } else {
+ set_handle_irq(bcm2835_handle_irq);
+ }
+
+ return 0;
+}
+
+static int __init bcm2835_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, false);
+}
+
+static int __init bcm2836_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, true);
+}
+
+
+/*
+ * Handle each interrupt across the entire interrupt controller. This reads the
+ * status register before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+
+static u32 armctrl_translate_bank(int bank)
+{
+ u32 stat = readl_relaxed(intc.pending[bank]);
+
+ return MAKE_HWIRQ(bank, ffs(stat) - 1);
+}
+
+static u32 armctrl_translate_shortcut(int bank, u32 stat)
+{
+ return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
+}
+
+static u32 get_next_armctrl_hwirq(void)
+{
+ u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
+
+ if (stat == 0)
+ return ~0;
+ else if (stat & BANK0_HWIRQ_MASK)
+ return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
+ else if (stat & SHORTCUT1_MASK)
+ return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
+ else if (stat & SHORTCUT2_MASK)
+ return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
+ else if (stat & BANK1_HWIRQ)
+ return armctrl_translate_bank(1);
+ else if (stat & BANK2_HWIRQ)
+ return armctrl_translate_bank(2);
+ else
+ BUG();
+}
+
+static void __exception_irq_entry bcm2835_handle_irq(
+ struct pt_regs *regs)
+{
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ generic_handle_domain_irq(intc.domain, hwirq);
+}
+
+static void bcm2836_chained_handle_irq(struct irq_desc *desc)
+{
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ generic_handle_domain_irq(intc.domain, hwirq);
+}
+
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+ bcm2835_armctrl_of_init);
+IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
+ bcm2836_armctrl_of_init);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
new file mode 100644
index 000000000..51491c3c6
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-bcm2836.h>
+
+#include <asm/exception.h>
+
+struct bcm2836_arm_irqchip_intc {
+ struct irq_domain *domain;
+ void __iomem *base;
+};
+
+static struct bcm2836_arm_irqchip_intc intc __read_mostly;
+
+static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) & ~BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) | BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static struct irq_chip bcm2836_arm_irqchip_timer = {
+ .name = "bcm2836-timer",
+ .irq_mask = bcm2836_arm_irqchip_mask_timer_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR);
+}
+
+static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_pmu = {
+ .name = "bcm2836-pmu",
+ .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
+{
+}
+
+static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_gpu = {
+ .name = "bcm2836-gpu",
+ .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
+};
+
+static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_dummy = {
+ .name = "bcm2836-dummy",
+ .irq_eoi = bcm2836_arm_irqchip_dummy_op,
+};
+
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_MAILBOX0:
+ chip = &bcm2836_arm_irqchip_dummy;
+ break;
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
+
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
+}
+
+static void
+__exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ u32 stat;
+
+ stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
+ if (stat) {
+ u32 hwirq = ffs(stat) - 1;
+
+ generic_handle_domain_irq(intc.domain, hwirq);
+ }
+}
+
+#ifdef CONFIG_SMP
+static struct irq_domain *ipi_domain;
+
+static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cpu = smp_processor_id();
+ u32 mbox_val;
+
+ chained_irq_enter(chip, desc);
+
+ mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+ if (mbox_val) {
+ int hwirq = ffs(mbox_val) - 1;
+ generic_handle_domain_irq(ipi_domain, hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
+{
+ int cpu = smp_processor_id();
+
+ writel_relaxed(BIT(d->hwirq),
+ intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+}
+
+static void bcm2836_arm_irqchip_ipi_send_mask(struct irq_data *d,
+ const struct cpumask *mask)
+{
+ int cpu;
+ void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
+
+ /*
+ * Ensure that stores to normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ smp_wmb();
+
+ for_each_cpu(cpu, mask)
+ writel_relaxed(BIT(d->hwirq), mailbox0_base + 16 * cpu);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_ipi = {
+ .name = "IPI",
+ .irq_mask = bcm2836_arm_irqchip_dummy_op,
+ .irq_unmask = bcm2836_arm_irqchip_dummy_op,
+ .irq_ack = bcm2836_arm_irqchip_ipi_ack,
+ .ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
+};
+
+static int bcm2836_arm_irqchip_ipi_alloc(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_set_percpu_devid(virq + i);
+ irq_domain_set_info(d, virq + i, i, &bcm2836_arm_irqchip_ipi,
+ d->host_data,
+ handle_percpu_devid_irq,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void bcm2836_arm_irqchip_ipi_free(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Not freeing IPIs */
+}
+
+static const struct irq_domain_ops ipi_domain_ops = {
+ .alloc = bcm2836_arm_irqchip_ipi_alloc,
+ .free = bcm2836_arm_irqchip_ipi_free,
+};
+
+static int bcm2836_cpu_starting(unsigned int cpu)
+{
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
+}
+
+static int bcm2836_cpu_dying(unsigned int cpu)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
+}
+
+#define BITS_PER_MBOX 32
+
+static void __init bcm2836_arm_irqchip_smp_init(void)
+{
+ struct irq_fwspec ipi_fwspec = {
+ .fwnode = intc.domain->fwnode,
+ .param_count = 1,
+ .param = {
+ [0] = LOCAL_IRQ_MAILBOX0,
+ },
+ };
+ int base_ipi, mux_irq;
+
+ mux_irq = irq_create_fwspec_mapping(&ipi_fwspec);
+ if (WARN_ON(mux_irq <= 0))
+ return;
+
+ ipi_domain = irq_domain_create_linear(intc.domain->fwnode,
+ BITS_PER_MBOX, &ipi_domain_ops,
+ NULL);
+ if (WARN_ON(!ipi_domain))
+ return;
+
+ ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+
+ base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, BITS_PER_MBOX,
+ NUMA_NO_NODE, NULL,
+ false, NULL);
+
+ if (WARN_ON(!base_ipi))
+ return;
+
+ set_smp_ipi_range(base_ipi, BITS_PER_MBOX);
+
+ irq_set_chained_handler_and_data(mux_irq,
+ bcm2836_arm_irqchip_handle_ipi, NULL);
+
+ /* Unmask IPIs to the boot CPU. */
+ cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
+ "irqchip/bcm2836:starting", bcm2836_cpu_starting,
+ bcm2836_cpu_dying);
+}
+#else
+#define bcm2836_arm_irqchip_smp_init() do { } while(0)
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
+};
+
+/*
+ * The LOCAL_IRQ_CNT* timer firings are based off of the external
+ * oscillator with some scaling. The firmware sets up CNTFRQ to
+ * report 19.2Mhz, but doesn't set up the scaling registers.
+ */
+static void bcm2835_init_local_timer_frequency(void)
+{
+ /*
+ * Set the timer to source from the 19.2Mhz crystal clock (bit
+ * 8 unset), and only increment by 1 instead of 2 (bit 9
+ * unset).
+ */
+ writel(0, intc.base + LOCAL_CONTROL);
+
+ /*
+ * Set the timer prescaler to 1:1 (timer freq = input freq *
+ * 2**31 / prescaler)
+ */
+ writel(0x80000000, intc.base + LOCAL_PRESCALER);
+}
+
+static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ intc.base = of_iomap(node, 0);
+ if (!intc.base) {
+ panic("%pOF: unable to map local interrupt registers\n", node);
+ }
+
+ bcm2835_init_local_timer_frequency();
+
+ intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+ &bcm2836_arm_irqchip_intc_ops,
+ NULL);
+ if (!intc.domain)
+ panic("%pOF: unable to create IRQ domain\n", node);
+
+ irq_domain_update_bus_token(intc.domain, DOMAIN_BUS_WIRED);
+
+ bcm2836_arm_irqchip_smp_init();
+
+ set_handle_irq(bcm2836_arm_irqchip_handle_irq);
+ return 0;
+}
+
+IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc",
+ bcm2836_arm_irqchip_l1_intc_of_init);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 000000000..b14c74f7b
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
+ * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
+ * 0x1000_0030: CPU1_W0_ENABLE
+ * 0x1000_0034: CPU1_W1_ENABLE
+ * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
+ * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
+ *
+ * BCM63168:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W2_ENABLE
+ * 0x1000_002c: CPU0_W3_ENABLE
+ * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
+ * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
+ * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
+ * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
+ * 0x1000_0040: CPU1_W0_ENABLE
+ * 0x1000_0044: CPU1_W1_ENABLE
+ * 0x1000_0048: CPU1_W2_ENABLE
+ * 0x1000_004c: CPU1_W3_ENABLE
+ * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
+ * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
+ * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
+ * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct cpumask cpumask;
+ struct bcm6345_l1_cpu *cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+ struct bcm6345_l1_chip *intc;
+ void __iomem *map_base;
+ unsigned int parent_irq;
+ u32 enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+ struct irq_data *d)
+{
+ return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+ struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
+ struct bcm6345_l1_chip *intc = cpu->intc;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending;
+ irq_hw_number_t hwirq;
+
+ pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+ pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ if (generic_handle_domain_irq(intc->domain, base + hwirq))
+ spurious_interrupt();
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_unmask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_mask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int old_cpu = cpu_for_irq(intc, d);
+ unsigned int new_cpu;
+ struct cpumask valid;
+ unsigned long flags;
+ bool enabled;
+
+ if (!cpumask_and(&valid, &intc->cpumask, dest))
+ return -EINVAL;
+
+ new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ if (new_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ dest = cpumask_of(new_cpu);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (old_cpu != new_cpu) {
+ enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+ if (enabled)
+ __bcm6345_l1_mask(d);
+ irq_data_update_affinity(d, dest);
+ if (enabled)
+ __bcm6345_l1_unmask(d);
+ } else {
+ irq_data_update_affinity(d, dest);
+ }
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm6345_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm6345_l1_cpu *cpu;
+ unsigned int i, n_words;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->intc = intc;
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ cpu->enable_cache[i] = 0;
+ __raw_writel(0, cpu->map_base + reg_enable(intc, i));
+ }
+
+ cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!cpu->parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+ return -EINVAL;
+ }
+ irq_set_chained_handler_and_data(cpu->parent_irq,
+ bcm6345_l1_irq_handle, cpu);
+
+ return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+ .name = "bcm6345-l1",
+ .irq_mask = bcm6345_l1_mask,
+ .irq_unmask = bcm6345_l1_unmask,
+ .irq_set_affinity = bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(virq,
+ &bcm6345_l1_irq_chip, handle_percpu_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm6345_l1_chip *intc;
+ unsigned int idx;
+ int ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ for_each_possible_cpu(idx) {
+ ret = bcm6345_l1_init_one(dn, idx, intc);
+ if (ret)
+ pr_err("failed to init intc L1 for cpu %d: %d\n",
+ idx, ret);
+ else
+ cpumask_set_cpu(idx, &intc->cpumask);
+ }
+
+ if (cpumask_empty(&intc->cpumask)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ raw_spin_lock_init(&intc->lock);
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm6345_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+ IRQS_PER_WORD * intc->n_words);
+ for_each_cpu(idx, &intc->cpumask) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+ cpu->map_base, cpu->parent_irq);
+ }
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
new file mode 100644
index 000000000..a62b96237
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom BCM7038 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/syscore_ops.h>
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
+#define MAX_WORDS 8
+
+struct bcm7038_l1_cpu;
+
+struct bcm7038_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct bcm7038_l1_cpu *cpus[NR_CPUS];
+#ifdef CONFIG_PM_SLEEP
+ struct list_head list;
+ u32 wake_mask[MAX_WORDS];
+#endif
+ u32 irq_fwd_mask[MAX_WORDS];
+ u8 affinity[MAX_WORDS * IRQS_PER_WORD];
+};
+
+struct bcm7038_l1_cpu {
+ void __iomem *map_base;
+ u32 mask_cache[];
+};
+
+/*
+ * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
+ *
+ * 7038:
+ * 0x1000_1400: W0_STATUS
+ * 0x1000_1404: W1_STATUS
+ * 0x1000_1408: W0_MASK_STATUS
+ * 0x1000_140c: W1_MASK_STATUS
+ * 0x1000_1410: W0_MASK_SET
+ * 0x1000_1414: W1_MASK_SET
+ * 0x1000_1418: W0_MASK_CLEAR
+ * 0x1000_141c: W1_MASK_CLEAR
+ *
+ * 7445:
+ * 0xf03e_1500: W0_STATUS
+ * 0xf03e_1504: W1_STATUS
+ * 0xf03e_1508: W2_STATUS
+ * 0xf03e_150c: W3_STATUS
+ * 0xf03e_1510: W4_STATUS
+ * 0xf03e_1514: W0_MASK_STATUS
+ * 0xf03e_1518: W1_MASK_STATUS
+ * [...]
+ */
+
+static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (0 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (1 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (2 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
+ unsigned int word)
+{
+ return (3 * intc->n_words + word) * sizeof(u32);
+}
+
+static inline u32 l1_readl(void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return ioread32be(reg);
+ else
+ return readl(reg);
+}
+
+static inline void l1_writel(u32 val, void __iomem *reg)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ iowrite32be(val, reg);
+ else
+ writel(val, reg);
+}
+
+static void bcm7038_l1_irq_handle(struct irq_desc *desc)
+{
+ struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
+ struct bcm7038_l1_cpu *cpu;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
+ cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+ cpu = intc->cpus[0];
+#endif
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending, flags;
+ int hwirq;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
+ ~cpu->mask_cache[idx];
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
+ generic_handle_domain_irq(intc->domain, base + hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_clr(intc, word));
+}
+
+static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ intc->cpus[cpu_idx]->mask_cache[word] |= mask;
+ l1_writel(mask, intc->cpus[cpu_idx]->map_base +
+ reg_mask_set(intc, word));
+}
+
+static void bcm7038_l1_unmask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm7038_l1_mask(struct irq_data *d)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
+static int bcm7038_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ irq_hw_number_t hw = d->hwirq;
+ u32 word = hw / IRQS_PER_WORD;
+ u32 mask = BIT(hw % IRQS_PER_WORD);
+ unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
+ bool was_disabled;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+
+ was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
+ mask);
+ __bcm7038_l1_mask(d, intc->affinity[hw]);
+ intc->affinity[hw] = first_cpu;
+ if (!was_disabled)
+ __bcm7038_l1_unmask(d, first_cpu);
+
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+ irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
+
+ return 0;
+}
+#endif
+
+static int __init bcm7038_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm7038_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm7038_l1_cpu *cpu;
+ unsigned int i, n_words, parent_irq;
+ int ret;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (n_words > MAX_WORDS)
+ return -EINVAL;
+ else if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
+ intc->irq_fwd_mask, n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ l1_writel(~intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_set(intc, i));
+ l1_writel(intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_clr(intc, i));
+ cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
+ }
+
+ parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", parent_irq);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
+ irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
+ intc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
+ * used because the struct chip_type suspend/resume hooks are not called
+ * unless chip_type is hooked onto a generic_chip. Since this driver does
+ * not use generic_chip, we need to manually hook our resume/suspend to
+ * syscore_ops.
+ */
+static LIST_HEAD(bcm7038_l1_intcs_list);
+static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
+
+static int bcm7038_l1_suspend(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+ u32 val;
+
+ /* Wakeup interrupt should only come from the boot cpu */
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
+ boot_cpu = cpu_logical_map(0);
+#else
+ boot_cpu = 0;
+#endif
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
+ l1_writel(~val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+
+ return 0;
+}
+
+static void bcm7038_l1_resume(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
+ boot_cpu = cpu_logical_map(0);
+#else
+ boot_cpu = 0;
+#endif
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+}
+
+static struct syscore_ops bcm7038_l1_syscore_ops = {
+ .suspend = bcm7038_l1_suspend,
+ .resume = bcm7038_l1_resume,
+};
+
+static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (on)
+ intc->wake_mask[word] |= mask;
+ else
+ intc->wake_mask[word] &= ~mask;
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return 0;
+}
+#endif
+
+static struct irq_chip bcm7038_l1_irq_chip = {
+ .name = "bcm7038-l1",
+ .irq_mask = bcm7038_l1_mask,
+ .irq_unmask = bcm7038_l1_unmask,
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
+ .irq_set_affinity = bcm7038_l1_set_affinity,
+#endif
+#ifdef CONFIG_PM_SLEEP
+ .irq_set_wake = bcm7038_l1_set_wake,
+#endif
+};
+
+static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ struct bcm7038_l1_chip *intc = d->host_data;
+ u32 mask = BIT(hw_irq % IRQS_PER_WORD);
+ u32 word = hw_irq / IRQS_PER_WORD;
+
+ if (intc->irq_fwd_mask[word] & mask)
+ return -EPERM;
+
+ irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irqd_set_single_target(irq_get_irq_data(virq));
+ return 0;
+}
+
+static const struct irq_domain_ops bcm7038_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm7038_l1_map,
+};
+
+static int __init bcm7038_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm7038_l1_chip *intc;
+ int idx, ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&intc->lock);
+ for_each_possible_cpu(idx) {
+ ret = bcm7038_l1_init_one(dn, idx, intc);
+ if (ret < 0) {
+ if (idx)
+ break;
+ pr_err("failed to remap intc L1 registers\n");
+ goto out_free;
+ }
+ }
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm7038_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ /* Add bcm7038_l1_chip into a list */
+ raw_spin_lock(&bcm7038_l1_intcs_lock);
+ list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
+ raw_spin_unlock(&bcm7038_l1_intcs_lock);
+
+ if (list_is_singular(&bcm7038_l1_intcs_list))
+ register_syscore_ops(&bcm7038_l1_syscore_ops);
+#endif
+
+ pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
+ dn, IRQS_PER_WORD * intc->n_words);
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
+MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
new file mode 100644
index 000000000..1e9dab6e0
--- /dev/null
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom BCM7120 style Level 2 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+/* Register offset in the L2 interrupt controller */
+#define IRQEN 0x00
+#define IRQSTAT 0x04
+
+#define MAX_WORDS 4
+#define MAX_MAPPINGS (MAX_WORDS * 2)
+#define IRQS_PER_WORD 32
+
+struct bcm7120_l1_intc_data {
+ struct bcm7120_l2_intc_data *b;
+ u32 irq_map_mask[MAX_WORDS];
+};
+
+struct bcm7120_l2_intc_data {
+ unsigned int n_words;
+ void __iomem *map_base[MAX_MAPPINGS];
+ void __iomem *pair_base[MAX_WORDS];
+ int en_offset[MAX_WORDS];
+ int stat_offset[MAX_WORDS];
+ struct irq_domain *domain;
+ bool can_wake;
+ u32 irq_fwd_mask[MAX_WORDS];
+ struct bcm7120_l1_intc_data *l1_data;
+ int num_parent_irqs;
+ const __be32 *map_mask_prop;
+};
+
+static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
+{
+ struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
+ struct bcm7120_l2_intc_data *b = data->b;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < b->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ struct irq_chip_generic *gc =
+ irq_get_domain_generic_chip(b->domain, base);
+ unsigned long pending;
+ int hwirq;
+
+ irq_gc_lock(gc);
+ pending = irq_reg_readl(gc, b->stat_offset[idx]) &
+ gc->mask_cache &
+ data->irq_map_mask[idx];
+ irq_gc_unlock(gc);
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
+ generic_handle_domain_irq(b->domain, base + hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
+{
+ struct bcm7120_l2_intc_data *b = gc->private;
+ struct irq_chip_type *ct = gc->chip_types;
+
+ irq_gc_lock(gc);
+ if (b->can_wake)
+ irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
+ ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+
+static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
+{
+ struct irq_chip_type *ct = gc->chip_types;
+
+ /* Restore the saved mask */
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+
+static int bcm7120_l2_intc_init_one(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data,
+ int irq, u32 *valid_mask)
+{
+ struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq];
+ int parent_irq;
+ unsigned int idx;
+
+ parent_irq = irq_of_parse_and_map(dn, irq);
+ if (!parent_irq) {
+ pr_err("failed to map interrupt %d\n", irq);
+ return -EINVAL;
+ }
+
+ /* For multiple parent IRQs with multiple words, this looks like:
+ * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+ *
+ * We need to associate a given parent interrupt with its corresponding
+ * map_mask in order to mask the status register with it because we
+ * have the same handler being called for multiple parent interrupts.
+ *
+ * This is typically something needed on BCM7xxx (STB chips).
+ */
+ for (idx = 0; idx < data->n_words; idx++) {
+ if (data->map_mask_prop) {
+ l1_data->irq_map_mask[idx] |=
+ be32_to_cpup(data->map_mask_prop +
+ irq * data->n_words + idx);
+ } else {
+ l1_data->irq_map_mask[idx] = 0xffffffff;
+ }
+ valid_mask[idx] |= l1_data->irq_map_mask[idx];
+ }
+
+ l1_data->b = data;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ bcm7120_l2_intc_irq_handle, l1_data);
+ if (data->can_wake)
+ enable_irq_wake(parent_irq);
+
+ return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ int ret;
+
+ data->map_base[0] = of_iomap(dn, 0);
+ if (!data->map_base[0]) {
+ pr_err("unable to map registers\n");
+ return -ENOMEM;
+ }
+
+ data->pair_base[0] = data->map_base[0];
+ data->en_offset[0] = IRQEN;
+ data->stat_offset[0] = IRQSTAT;
+ data->n_words = 1;
+
+ ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
+ data->irq_fwd_mask, data->n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
+ data->map_mask_prop = of_get_property(dn, "brcm,int-map-mask", &ret);
+ if (!data->map_mask_prop ||
+ (ret != (sizeof(__be32) * data->num_parent_irqs * data->n_words))) {
+ pr_err("invalid brcm,int-map-mask property\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
+ struct bcm7120_l2_intc_data *data)
+{
+ unsigned int gc_idx;
+
+ for (gc_idx = 0; gc_idx < MAX_WORDS; gc_idx++) {
+ unsigned int map_idx = gc_idx * 2;
+ void __iomem *en = of_iomap(dn, map_idx + 0);
+ void __iomem *stat = of_iomap(dn, map_idx + 1);
+ void __iomem *base = min(en, stat);
+
+ data->map_base[map_idx + 0] = en;
+ data->map_base[map_idx + 1] = stat;
+
+ if (!base)
+ break;
+
+ data->pair_base[gc_idx] = base;
+ data->en_offset[gc_idx] = en - base;
+ data->stat_offset[gc_idx] = stat - base;
+ }
+
+ if (!gc_idx) {
+ pr_err("unable to map registers\n");
+ return -EINVAL;
+ }
+
+ data->n_words = gc_idx;
+ return 0;
+}
+
+static int __init bcm7120_l2_intc_probe(struct device_node *dn,
+ struct device_node *parent,
+ int (*iomap_regs_fn)(struct device_node *,
+ struct bcm7120_l2_intc_data *),
+ const char *intc_name)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct bcm7120_l2_intc_data *data;
+ struct platform_device *pdev;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ int ret = 0;
+ unsigned int idx, irq, flags;
+ u32 valid_mask[MAX_WORDS] = { };
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ pdev = of_find_device_by_node(dn);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out_free_data;
+ }
+
+ data->num_parent_irqs = platform_irq_count(pdev);
+ put_device(&pdev->dev);
+ if (data->num_parent_irqs <= 0) {
+ pr_err("invalid number of parent interrupts\n");
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data),
+ GFP_KERNEL);
+ if (!data->l1_data) {
+ ret = -ENOMEM;
+ goto out_free_l1_data;
+ }
+
+ ret = iomap_regs_fn(dn, data);
+ if (ret < 0)
+ goto out_free_l1_data;
+
+ data->can_wake = of_property_read_bool(dn, "brcm,irq-can-wake");
+
+ for (irq = 0; irq < data->num_parent_irqs; irq++) {
+ ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
+ if (ret)
+ goto out_free_l1_data;
+ }
+
+ data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
+ &irq_generic_chip_ops, NULL);
+ if (!data->domain) {
+ ret = -ENOMEM;
+ goto out_free_l1_data;
+ }
+
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ flags = IRQ_GC_INIT_MASK_CACHE;
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ flags |= IRQ_GC_BE_IO;
+
+ ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
+ dn->full_name, handle_level_irq, clr,
+ IRQ_LEVEL, flags);
+ if (ret) {
+ pr_err("failed to allocate generic irq chip\n");
+ goto out_free_domain;
+ }
+
+ for (idx = 0; idx < data->n_words; idx++) {
+ irq = idx * IRQS_PER_WORD;
+ gc = irq_get_domain_generic_chip(data->domain, irq);
+
+ gc->unused = 0xffffffff & ~valid_mask[idx];
+ gc->private = data;
+ ct = gc->chip_types;
+
+ gc->reg_base = data->pair_base[idx];
+ ct->regs.mask = data->en_offset[idx];
+
+ /* gc->reg_base is defined and so is gc->writel */
+ irq_reg_writel(gc, data->irq_fwd_mask[idx],
+ data->en_offset[idx]);
+
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_ack = irq_gc_noop;
+ gc->suspend = bcm7120_l2_intc_suspend;
+ gc->resume = bcm7120_l2_intc_resume;
+
+ /*
+ * Initialize mask-cache, in case we need it for
+ * saving/restoring fwd mask even w/o any child interrupts
+ * installed
+ */
+ gc->mask_cache = irq_reg_readl(gc, ct->regs.mask);
+
+ if (data->can_wake) {
+ /* This IRQ chip can wake the system, set all
+ * relevant child interrupts in wake_enabled mask
+ */
+ gc->wake_enabled = 0xffffffff;
+ gc->wake_enabled &= ~gc->unused;
+ ct->chip.irq_set_wake = irq_gc_set_wake;
+ }
+ }
+
+ pr_info("registered %s intc (%pOF, parent IRQ(s): %d)\n",
+ intc_name, dn, data->num_parent_irqs);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(data->domain);
+out_free_l1_data:
+ kfree(data->l1_data);
+out_unmap:
+ for (idx = 0; idx < MAX_MAPPINGS; idx++) {
+ if (data->map_base[idx])
+ iounmap(data->map_base[idx]);
+ }
+out_free_data:
+ kfree(data);
+ return ret;
+}
+
+static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+ "BCM7120 L2");
+}
+
+static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+ struct device_node *parent)
+{
+ return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+ "BCM3380 L2");
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7120_l2)
+IRQCHIP_MATCH("brcm,bcm7120-l2-intc", bcm7120_l2_intc_probe_7120)
+IRQCHIP_MATCH("brcm,bcm3380-l2-intc", bcm7120_l2_intc_probe_3380)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7120_l2)
+MODULE_DESCRIPTION("Broadcom STB 7120-style L2 interrupt controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
new file mode 100644
index 000000000..091b0fe7e
--- /dev/null
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
+ *
+ * Copyright (C) 2014-2017 Broadcom
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+struct brcmstb_intc_init_params {
+ irq_flow_handler_t handler;
+ int cpu_status;
+ int cpu_clear;
+ int cpu_mask_status;
+ int cpu_mask_set;
+ int cpu_mask_clear;
+};
+
+/* Register offsets in the L2 latched interrupt controller */
+static const struct brcmstb_intc_init_params l2_edge_intc_init = {
+ .handler = handle_edge_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = 0x08,
+ .cpu_mask_status = 0x0c,
+ .cpu_mask_set = 0x10,
+ .cpu_mask_clear = 0x14
+};
+
+/* Register offsets in the L2 level interrupt controller */
+static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
+ .handler = handle_level_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = -1, /* Register not present */
+ .cpu_mask_status = 0x04,
+ .cpu_mask_set = 0x08,
+ .cpu_mask_clear = 0x0C
+};
+
+/* L2 intc private data structure */
+struct brcmstb_l2_intc_data {
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int status_offset;
+ int mask_offset;
+ bool can_wake;
+ u32 saved_mask; /* for suspend/resume */
+};
+
+/**
+ * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register and pending interrupt is acknowledged by setting a bit.
+ *
+ * Note: This function is generic and could easily be added to the
+ * generic irqchip implementation if there ever becomes a will to do so.
+ * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
+ *
+ * e.g.: https://patchwork.kernel.org/patch/9831047/
+ */
+static void brcmstb_l2_mask_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(gc, mask, ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
+static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
+{
+ struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq;
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ status = irq_reg_readl(b->gc, b->status_offset) &
+ ~(irq_reg_readl(b->gc, b->mask_offset));
+
+ if (status == 0) {
+ raw_spin_lock(&desc->lock);
+ handle_bad_irq(desc);
+ raw_spin_unlock(&desc->lock);
+ goto out;
+ }
+
+ do {
+ irq = ffs(status) - 1;
+ status &= ~(1 << irq);
+ generic_handle_domain_irq(b->domain, irq);
+ } while (status);
+out:
+ chained_irq_exit(chip, desc);
+}
+
+static void brcmstb_l2_intc_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
+
+ irq_gc_lock_irqsave(gc, flags);
+ /* Save the current mask */
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
+
+ if (b->can_wake) {
+ /* Program the wakeup mask */
+ irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
+ irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
+ }
+ irq_gc_unlock_irqrestore(gc, flags);
+}
+
+static void brcmstb_l2_intc_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
+
+ irq_gc_lock_irqsave(gc, flags);
+ if (ct->chip.irq_ack) {
+ /* Clear unmasked non-wakeup interrupts */
+ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
+ ct->regs.ack);
+ }
+
+ /* Restore the saved mask */
+ irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
+ irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
+ irq_gc_unlock_irqrestore(gc, flags);
+}
+
+static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ struct device_node *parent,
+ const struct brcmstb_intc_init_params
+ *init_params)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int set = 0;
+ struct brcmstb_l2_intc_data *data;
+ struct irq_chip_type *ct;
+ int ret;
+ unsigned int flags;
+ int parent_irq;
+ void __iomem *base;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("failed to remap intc L2 registers\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ /* Disable all interrupts by default */
+ writel(0xffffffff, base + init_params->cpu_mask_set);
+
+ /* Wakeup interrupts may be retained from S5 (cold boot) */
+ data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
+ if (!data->can_wake && (init_params->cpu_clear >= 0))
+ writel(0xffffffff, base + init_params->cpu_clear);
+
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (!parent_irq) {
+ pr_err("failed to find parent interrupt\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ data->domain = irq_domain_add_linear(np, 32,
+ &irq_generic_chip_ops, NULL);
+ if (!data->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ flags = 0;
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ flags |= IRQ_GC_BE_IO;
+
+ if (init_params->handler == handle_level_irq)
+ set |= IRQ_LEVEL;
+
+ /* Allocate a single Generic IRQ chip for this node */
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
+ np->full_name, init_params->handler, clr, set, flags);
+ if (ret) {
+ pr_err("failed to allocate generic irq chip\n");
+ goto out_free_domain;
+ }
+
+ /* Set the IRQ chaining logic */
+ irq_set_chained_handler_and_data(parent_irq,
+ brcmstb_l2_intc_irq_handle, data);
+
+ data->gc = irq_get_domain_generic_chip(data->domain, 0);
+ data->gc->reg_base = base;
+ data->gc->private = data;
+ data->status_offset = init_params->cpu_status;
+ data->mask_offset = init_params->cpu_mask_status;
+
+ ct = data->gc->chip_types;
+
+ if (init_params->cpu_clear >= 0) {
+ ct->regs.ack = init_params->cpu_clear;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ } else {
+ /* No Ack - but still slightly more efficient to define this */
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ }
+
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->regs.disable = init_params->cpu_mask_set;
+ ct->regs.mask = init_params->cpu_mask_status;
+
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->regs.enable = init_params->cpu_mask_clear;
+
+ ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
+ ct->chip.irq_resume = brcmstb_l2_intc_resume;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
+
+ if (data->can_wake) {
+ /* This IRQ chip can wake the system, set all child interrupts
+ * in wake_enabled mask
+ */
+ data->gc->wake_enabled = 0xffffffff;
+ ct->chip.irq_set_wake = irq_gc_set_wake;
+ enable_irq_wake(parent_irq);
+ }
+
+ pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(data->domain);
+out_unmap:
+ iounmap(base);
+out_free:
+ kfree(data);
+ return ret;
+}
+
+static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+}
+
+static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
+MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
new file mode 100644
index 000000000..77ebe7e47
--- /dev/null
+++ b/drivers/irqchip/irq-clps711x.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CLPS711X IRQ driver
+ *
+ * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define CLPS711X_INTSR1 (0x0240)
+#define CLPS711X_INTMR1 (0x0280)
+#define CLPS711X_BLEOI (0x0600)
+#define CLPS711X_MCEOI (0x0640)
+#define CLPS711X_TEOI (0x0680)
+#define CLPS711X_TC1EOI (0x06c0)
+#define CLPS711X_TC2EOI (0x0700)
+#define CLPS711X_RTCEOI (0x0740)
+#define CLPS711X_UMSEOI (0x0780)
+#define CLPS711X_COEOI (0x07c0)
+#define CLPS711X_INTSR2 (0x1240)
+#define CLPS711X_INTMR2 (0x1280)
+#define CLPS711X_SRXEOF (0x1600)
+#define CLPS711X_KBDEOI (0x1700)
+#define CLPS711X_INTSR3 (0x2240)
+#define CLPS711X_INTMR3 (0x2280)
+
+static const struct {
+#define CLPS711X_FLAG_EN (1 << 0)
+#define CLPS711X_FLAG_FIQ (1 << 1)
+ unsigned int flags;
+ phys_addr_t eoi;
+} clps711x_irqs[] = {
+ [1] = { CLPS711X_FLAG_FIQ, CLPS711X_BLEOI, },
+ [3] = { CLPS711X_FLAG_FIQ, CLPS711X_MCEOI, },
+ [4] = { CLPS711X_FLAG_EN, CLPS711X_COEOI, },
+ [5] = { CLPS711X_FLAG_EN, },
+ [6] = { CLPS711X_FLAG_EN, },
+ [7] = { CLPS711X_FLAG_EN, },
+ [8] = { CLPS711X_FLAG_EN, CLPS711X_TC1EOI, },
+ [9] = { CLPS711X_FLAG_EN, CLPS711X_TC2EOI, },
+ [10] = { CLPS711X_FLAG_EN, CLPS711X_RTCEOI, },
+ [11] = { CLPS711X_FLAG_EN, CLPS711X_TEOI, },
+ [12] = { CLPS711X_FLAG_EN, },
+ [13] = { CLPS711X_FLAG_EN, },
+ [14] = { CLPS711X_FLAG_EN, CLPS711X_UMSEOI, },
+ [15] = { CLPS711X_FLAG_EN, CLPS711X_SRXEOF, },
+ [16] = { CLPS711X_FLAG_EN, CLPS711X_KBDEOI, },
+ [17] = { CLPS711X_FLAG_EN, },
+ [18] = { CLPS711X_FLAG_EN, },
+ [28] = { CLPS711X_FLAG_EN, },
+ [29] = { CLPS711X_FLAG_EN, },
+ [32] = { CLPS711X_FLAG_FIQ, },
+};
+
+static struct {
+ void __iomem *base;
+ void __iomem *intmr[3];
+ void __iomem *intsr[3];
+ struct irq_domain *domain;
+ struct irq_domain_ops ops;
+} *clps711x_intc;
+
+static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
+{
+ u32 irqstat;
+
+ do {
+ irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
+ readw_relaxed(clps711x_intc->intsr[0]);
+ if (irqstat)
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1);
+
+ irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
+ readw_relaxed(clps711x_intc->intsr[1]);
+ if (irqstat)
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1 + 16);
+ } while (irqstat);
+}
+
+static void clps711x_intc_eoi(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hwirq].eoi);
+}
+
+static void clps711x_intc_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+ u32 tmp;
+
+ tmp = readl_relaxed(intmr);
+ tmp &= ~(1 << (hwirq % 16));
+ writel_relaxed(tmp, intmr);
+}
+
+static void clps711x_intc_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
+ u32 tmp;
+
+ tmp = readl_relaxed(intmr);
+ tmp |= 1 << (hwirq % 16);
+ writel_relaxed(tmp, intmr);
+}
+
+static struct irq_chip clps711x_intc_chip = {
+ .name = "clps711x-intc",
+ .irq_eoi = clps711x_intc_eoi,
+ .irq_mask = clps711x_intc_mask,
+ .irq_unmask = clps711x_intc_unmask,
+};
+
+static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_flow_handler_t handler = handle_level_irq;
+ unsigned int flags = 0;
+
+ if (!clps711x_irqs[hw].flags)
+ return 0;
+
+ if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
+ handler = handle_bad_irq;
+ flags |= IRQ_NOAUTOEN;
+ } else if (clps711x_irqs[hw].eoi) {
+ handler = handle_fasteoi_irq;
+ }
+
+ /* Clear down pending interrupt */
+ if (clps711x_irqs[hw].eoi)
+ writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
+
+ irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
+ irq_modify_status(virq, IRQ_NOPROBE, flags);
+
+ return 0;
+}
+
+static int __init _clps711x_intc_init(struct device_node *np,
+ phys_addr_t base, resource_size_t size)
+{
+ int err;
+
+ clps711x_intc = kzalloc(sizeof(*clps711x_intc), GFP_KERNEL);
+ if (!clps711x_intc)
+ return -ENOMEM;
+
+ clps711x_intc->base = ioremap(base, size);
+ if (!clps711x_intc->base) {
+ err = -ENOMEM;
+ goto out_kfree;
+ }
+
+ clps711x_intc->intsr[0] = clps711x_intc->base + CLPS711X_INTSR1;
+ clps711x_intc->intmr[0] = clps711x_intc->base + CLPS711X_INTMR1;
+ clps711x_intc->intsr[1] = clps711x_intc->base + CLPS711X_INTSR2;
+ clps711x_intc->intmr[1] = clps711x_intc->base + CLPS711X_INTMR2;
+ clps711x_intc->intsr[2] = clps711x_intc->base + CLPS711X_INTSR3;
+ clps711x_intc->intmr[2] = clps711x_intc->base + CLPS711X_INTMR3;
+
+ /* Mask all interrupts */
+ writel_relaxed(0, clps711x_intc->intmr[0]);
+ writel_relaxed(0, clps711x_intc->intmr[1]);
+ writel_relaxed(0, clps711x_intc->intmr[2]);
+
+ err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
+ if (err < 0)
+ goto out_iounmap;
+
+ clps711x_intc->ops.map = clps711x_intc_irq_map;
+ clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
+ clps711x_intc->domain =
+ irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
+ 0, 0, &clps711x_intc->ops, NULL);
+ if (!clps711x_intc->domain) {
+ err = -ENOMEM;
+ goto out_irqfree;
+ }
+
+ irq_set_default_host(clps711x_intc->domain);
+ set_handle_irq(clps711x_irqh);
+
+#ifdef CONFIG_FIQ
+ init_FIQ(0);
+#endif
+
+ return 0;
+
+out_irqfree:
+ irq_free_descs(0, ARRAY_SIZE(clps711x_irqs));
+
+out_iounmap:
+ iounmap(clps711x_intc->base);
+
+out_kfree:
+ kfree(clps711x_intc);
+
+ return err;
+}
+
+void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
+{
+ BUG_ON(_clps711x_intc_init(NULL, base, size));
+}
+
+#ifdef CONFIG_IRQCHIP
+static int __init clps711x_intc_init_dt(struct device_node *np,
+ struct device_node *parent)
+{
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return err;
+
+ return _clps711x_intc_init(np, res.start, resource_size(&res));
+}
+IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
+#endif
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
new file mode 100644
index 000000000..a05a7501e
--- /dev/null
+++ b/drivers/irqchip/irq-crossbar.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/irqchip/irq-crossbar.c
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sricharan R <r.sricharan@ti.com>
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ_FREE -1
+#define IRQ_RESERVED -2
+#define IRQ_SKIP -3
+#define GIC_IRQ_START 32
+
+/**
+ * struct crossbar_device - crossbar device description
+ * @lock: spinlock serializing access to @irq_map
+ * @int_max: maximum number of supported interrupts
+ * @safe_map: safe default value to initialize the crossbar
+ * @max_crossbar_sources: Maximum number of crossbar sources
+ * @irq_map: array of interrupts to crossbar number mapping
+ * @crossbar_base: crossbar base address
+ * @register_offsets: offsets for each irq number
+ * @write: register write function pointer
+ */
+struct crossbar_device {
+ raw_spinlock_t lock;
+ uint int_max;
+ uint safe_map;
+ uint max_crossbar_sources;
+ uint *irq_map;
+ void __iomem *crossbar_base;
+ int *register_offsets;
+ void (*write)(int, int);
+};
+
+static struct crossbar_device *cb;
+
+static void crossbar_writel(int irq_no, int cb_no)
+{
+ writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static void crossbar_writew(int irq_no, int cb_no)
+{
+ writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static void crossbar_writeb(int irq_no, int cb_no)
+{
+ writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
+}
+
+static struct irq_chip crossbar_chip = {
+ .name = "CBAR",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ int i;
+ int err;
+
+ if (!irq_domain_get_of_node(domain->parent))
+ return -EINVAL;
+
+ raw_spin_lock(&cb->lock);
+ for (i = cb->int_max - 1; i >= 0; i--) {
+ if (cb->irq_map[i] == IRQ_FREE) {
+ cb->irq_map[i] = hwirq;
+ break;
+ }
+ }
+ raw_spin_unlock(&cb->lock);
+
+ if (i < 0)
+ return -ENODEV;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = i;
+ fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ cb->irq_map[i] = IRQ_FREE;
+ else
+ cb->write(i, hwirq);
+
+ return err;
+}
+
+static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ irq_hw_number_t hwirq;
+ int i;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = fwspec->param[1];
+ if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
+ return -EINVAL; /* Can't deal with this */
+
+ for (i = 0; i < nr_irqs; i++) {
+ int err = allocate_gic_irq(d, virq + i, hwirq + i);
+
+ if (err)
+ return err;
+
+ irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
+ &crossbar_chip, NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * crossbar_domain_free - unmap/free a crossbar<->irq connection
+ * @domain: domain of irq to unmap
+ * @virq: virq number
+ * @nr_irqs: number of irqs to free
+ *
+ * We do not maintain a use count of total number of map/unmap
+ * calls for a particular irq to find out if a irq can be really
+ * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
+ * after which irq is anyways unusable. So an explicit map has to be called
+ * after that.
+ */
+static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ raw_spin_lock(&cb->lock);
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_domain_reset_irq_data(d);
+ cb->irq_map[d->hwirq] = IRQ_FREE;
+ cb->write(d->hwirq, cb->safe_map);
+ }
+ raw_spin_unlock(&cb->lock);
+}
+
+static int crossbar_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct irq_domain_ops crossbar_domain_ops = {
+ .alloc = crossbar_domain_alloc,
+ .free = crossbar_domain_free,
+ .translate = crossbar_domain_translate,
+};
+
+static int __init crossbar_of_init(struct device_node *node)
+{
+ u32 max = 0, entry, reg_size;
+ int i, size, reserved = 0;
+ const __be32 *irqsr;
+ int ret = -ENOMEM;
+
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
+
+ if (!cb)
+ return ret;
+
+ cb->crossbar_base = of_iomap(node, 0);
+ if (!cb->crossbar_base)
+ goto err_cb;
+
+ of_property_read_u32(node, "ti,max-crossbar-sources",
+ &cb->max_crossbar_sources);
+ if (!cb->max_crossbar_sources) {
+ pr_err("missing 'ti,max-crossbar-sources' property\n");
+ ret = -EINVAL;
+ goto err_base;
+ }
+
+ of_property_read_u32(node, "ti,max-irqs", &max);
+ if (!max) {
+ pr_err("missing 'ti,max-irqs' property\n");
+ ret = -EINVAL;
+ goto err_base;
+ }
+ cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
+ if (!cb->irq_map)
+ goto err_base;
+
+ cb->int_max = max;
+
+ for (i = 0; i < max; i++)
+ cb->irq_map[i] = IRQ_FREE;
+
+ /* Get and mark reserved irqs */
+ irqsr = of_get_property(node, "ti,irqs-reserved", &size);
+ if (irqsr) {
+ size /= sizeof(__be32);
+
+ for (i = 0; i < size; i++) {
+ of_property_read_u32_index(node,
+ "ti,irqs-reserved",
+ i, &entry);
+ if (entry >= max) {
+ pr_err("Invalid reserved entry\n");
+ ret = -EINVAL;
+ goto err_irq_map;
+ }
+ cb->irq_map[entry] = IRQ_RESERVED;
+ }
+ }
+
+ /* Skip irqs hardwired to bypass the crossbar */
+ irqsr = of_get_property(node, "ti,irqs-skip", &size);
+ if (irqsr) {
+ size /= sizeof(__be32);
+
+ for (i = 0; i < size; i++) {
+ of_property_read_u32_index(node,
+ "ti,irqs-skip",
+ i, &entry);
+ if (entry >= max) {
+ pr_err("Invalid skip entry\n");
+ ret = -EINVAL;
+ goto err_irq_map;
+ }
+ cb->irq_map[entry] = IRQ_SKIP;
+ }
+ }
+
+
+ cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
+ if (!cb->register_offsets)
+ goto err_irq_map;
+
+ of_property_read_u32(node, "ti,reg-size", &reg_size);
+
+ switch (reg_size) {
+ case 1:
+ cb->write = crossbar_writeb;
+ break;
+ case 2:
+ cb->write = crossbar_writew;
+ break;
+ case 4:
+ cb->write = crossbar_writel;
+ break;
+ default:
+ pr_err("Invalid reg-size property\n");
+ ret = -EINVAL;
+ goto err_reg_offset;
+ break;
+ }
+
+ /*
+ * Register offsets are not linear because of the
+ * reserved irqs. so find and store the offsets once.
+ */
+ for (i = 0; i < max; i++) {
+ if (cb->irq_map[i] == IRQ_RESERVED)
+ continue;
+
+ cb->register_offsets[i] = reserved;
+ reserved += reg_size;
+ }
+
+ of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
+ /* Initialize the crossbar with safe map to start with */
+ for (i = 0; i < max; i++) {
+ if (cb->irq_map[i] == IRQ_RESERVED ||
+ cb->irq_map[i] == IRQ_SKIP)
+ continue;
+
+ cb->write(i, cb->safe_map);
+ }
+
+ raw_spin_lock_init(&cb->lock);
+
+ return 0;
+
+err_reg_offset:
+ kfree(cb->register_offsets);
+err_irq_map:
+ kfree(cb->irq_map);
+err_base:
+ iounmap(cb->crossbar_base);
+err_cb:
+ kfree(cb);
+
+ cb = NULL;
+ return ret;
+}
+
+static int __init irqcrossbar_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ int err;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ err = crossbar_of_init(node);
+ if (err)
+ return err;
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0,
+ cb->max_crossbar_sources,
+ node, &crossbar_domain_ops,
+ NULL);
+ if (!domain) {
+ pr_err("%pOF: failed to allocated domain\n", node);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
new file mode 100644
index 000000000..42d8a2438
--- /dev/null
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+
+#define INTC_IRQS 64
+
+#define CK_INTC_ICR 0x00
+#define CK_INTC_PEN31_00 0x14
+#define CK_INTC_PEN63_32 0x2c
+#define CK_INTC_NEN31_00 0x10
+#define CK_INTC_NEN63_32 0x28
+#define CK_INTC_SOURCE 0x40
+#define CK_INTC_DUAL_BASE 0x100
+
+#define GX_INTC_PEN31_00 0x00
+#define GX_INTC_PEN63_32 0x04
+#define GX_INTC_NEN31_00 0x40
+#define GX_INTC_NEN63_32 0x44
+#define GX_INTC_NMASK31_00 0x50
+#define GX_INTC_NMASK63_32 0x54
+#define GX_INTC_SOURCE 0x60
+
+static void __iomem *reg_base;
+static struct irq_domain *root_domain;
+
+static int nr_irq = INTC_IRQS;
+
+/*
+ * When controller support pulse signal, the PEN_reg will hold on signal
+ * without software trigger.
+ *
+ * So, to support pulse signal we need to clear IFR_reg and the address of
+ * IFR_offset is NEN_offset - 8.
+ */
+static void irq_ck_mask_set_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long ifr = ct->regs.mask - 8;
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ *ct->mask_cache |= mask;
+ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
+ irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr);
+ irq_gc_unlock(gc);
+}
+
+static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,
+ u32 mask_reg, u32 irq_base)
+{
+ struct irq_chip_generic *gc;
+
+ gc = irq_get_domain_generic_chip(root_domain, irq_base);
+ gc->reg_base = reg_base;
+ gc->chip_types[0].regs.mask = mask_reg;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ if (of_find_property(node, "csky,support-pulse-signal", NULL))
+ gc->chip_types[0].chip.irq_unmask = irq_ck_mask_set_bit;
+}
+
+static inline u32 build_channel_val(u32 idx, u32 magic)
+{
+ u32 res;
+
+ /*
+ * Set the same index for each channel
+ */
+ res = idx | (idx << 8) | (idx << 16) | (idx << 24);
+
+ /*
+ * Set the channel magic number in descending order.
+ * The magic is 0x00010203 for ck-intc
+ * The magic is 0x03020100 for gx6605s-intc
+ */
+ return res | magic;
+}
+
+static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
+{
+ u32 i;
+
+ /* Setup 64 channel slots */
+ for (i = 0; i < INTC_IRQS; i += 4)
+ writel(build_channel_val(i, magic), reg_addr + i);
+}
+
+static int __init
+ck_intc_init_comm(struct device_node *node, struct device_node *parent)
+{
+ int ret;
+
+ if (parent) {
+ pr_err("C-SKY Intc not a root irq controller\n");
+ return -EINVAL;
+ }
+
+ reg_base = of_iomap(node, 0);
+ if (!reg_base) {
+ pr_err("C-SKY Intc unable to map: %p.\n", node);
+ return -EINVAL;
+ }
+
+ root_domain = irq_domain_add_linear(node, nr_irq,
+ &irq_generic_chip_ops, NULL);
+ if (!root_domain) {
+ pr_err("C-SKY Intc irq_domain_add failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(root_domain, 32, 1,
+ "csky_intc", handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0, 0);
+ if (ret) {
+ pr_err("C-SKY Intc irq_alloc_gc failed.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
+ u32 irq_base)
+{
+ if (hwirq == 0)
+ return false;
+
+ generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq));
+
+ return true;
+}
+
+/* gx6605s 64 irqs interrupt controller */
+static void gx_irq_handler(struct pt_regs *regs)
+{
+ bool ret;
+
+retry:
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN63_32), 32);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN31_00), 0);
+ if (ret)
+ goto retry;
+}
+
+static int __init
+gx_intc_init(struct device_node *node, struct device_node *parent)
+{
+ int ret;
+
+ ret = ck_intc_init_comm(node, parent);
+ if (ret)
+ return ret;
+
+ /*
+ * Initial enable reg to disable all interrupts
+ */
+ writel(0x0, reg_base + GX_INTC_NEN31_00);
+ writel(0x0, reg_base + GX_INTC_NEN63_32);
+
+ /*
+ * Initial mask reg with all unmasked, because we only use enable reg
+ */
+ writel(0x0, reg_base + GX_INTC_NMASK31_00);
+ writel(0x0, reg_base + GX_INTC_NMASK63_32);
+
+ setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
+
+ ck_set_gc(node, reg_base, GX_INTC_NEN31_00, 0);
+ ck_set_gc(node, reg_base, GX_INTC_NEN63_32, 32);
+
+ set_handle_irq(gx_irq_handler);
+
+ return 0;
+}
+IRQCHIP_DECLARE(csky_gx6605s_intc, "csky,gx6605s-intc", gx_intc_init);
+
+/*
+ * C-SKY simple 64 irqs interrupt controller, dual-together could support 128
+ * irqs.
+ */
+static void ck_irq_handler(struct pt_regs *regs)
+{
+ bool ret;
+ void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
+ void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
+
+retry:
+ /* handle 0 - 63 irqs */
+ ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
+ if (ret)
+ goto retry;
+
+ if (nr_irq == INTC_IRQS)
+ return;
+
+ /* handle 64 - 127 irqs */
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
+ if (ret)
+ goto retry;
+}
+
+static int __init
+ck_intc_init(struct device_node *node, struct device_node *parent)
+{
+ int ret;
+
+ ret = ck_intc_init_comm(node, parent);
+ if (ret)
+ return ret;
+
+ /* Initial enable reg to disable all interrupts */
+ writel(0, reg_base + CK_INTC_NEN31_00);
+ writel(0, reg_base + CK_INTC_NEN63_32);
+
+ /* Enable irq intc */
+ writel(BIT(31), reg_base + CK_INTC_ICR);
+
+ ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
+ ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
+
+ setup_irq_channel(0x00010203, reg_base + CK_INTC_SOURCE);
+
+ set_handle_irq(ck_irq_handler);
+
+ return 0;
+}
+IRQCHIP_DECLARE(ck_intc, "csky,apb-intc", ck_intc_init);
+
+static int __init
+ck_dual_intc_init(struct device_node *node, struct device_node *parent)
+{
+ int ret;
+
+ /* dual-apb-intc up to 128 irq sources*/
+ nr_irq = INTC_IRQS * 2;
+
+ ret = ck_intc_init(node, parent);
+ if (ret)
+ return ret;
+
+ /* Initial enable reg to disable all interrupts */
+ writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
+ writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
+
+ ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
+ ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
+
+ setup_irq_channel(0x00010203,
+ reg_base + CK_INTC_SOURCE + CK_INTC_DUAL_BASE);
+
+ return 0;
+}
+IRQCHIP_DECLARE(ck_dual_intc, "csky,dual-apb-intc", ck_dual_intc_init);
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
new file mode 100644
index 000000000..4aebd67d4
--- /dev/null
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/reg_ops.h>
+
+static struct irq_domain *root_domain;
+static void __iomem *INTCG_base;
+static void __iomem *INTCL_base;
+
+#define IPI_IRQ 15
+#define INTC_IRQS 256
+#define COMM_IRQ_BASE 32
+
+#define INTCG_SIZE 0x8000
+#define INTCL_SIZE 0x1000
+
+#define INTCG_ICTLR 0x0
+#define INTCG_CICFGR 0x100
+#define INTCG_CIDSTR 0x1000
+
+#define INTCL_PICTLR 0x0
+#define INTCL_CFGR 0x14
+#define INTCL_SIGR 0x60
+#define INTCL_RDYIR 0x6c
+#define INTCL_SENR 0xa0
+#define INTCL_CENR 0xa4
+#define INTCL_CACR 0xb4
+
+static DEFINE_PER_CPU(void __iomem *, intcl_reg);
+
+static unsigned long *__trigger;
+
+#define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
+
+#define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4)
+#define TRIG_BIT_OFFSET(i) (((i) * 2) % 32)
+
+#define TRIG_VAL(trigger, irq) (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
+#define TRIG_VAL_MSK(irq) (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
+
+#define TRIG_BASE(irq) \
+ (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
+ (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
+
+static DEFINE_SPINLOCK(setup_lock);
+static void setup_trigger(unsigned long irq, unsigned long trigger)
+{
+ unsigned int tmp;
+
+ spin_lock(&setup_lock);
+
+ /* setup trigger */
+ tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
+
+ writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
+
+ spin_unlock(&setup_lock);
+}
+
+static void csky_mpintc_handler(struct pt_regs *regs)
+{
+ void __iomem *reg_base = this_cpu_read(intcl_reg);
+
+ generic_handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR));
+}
+
+static void csky_mpintc_unmask(struct irq_data *d)
+{
+ void __iomem *reg_base = this_cpu_read(intcl_reg);
+
+ setup_trigger(d->hwirq, __trigger[d->hwirq]);
+
+ writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
+}
+
+static void csky_mpintc_mask(struct irq_data *d)
+{
+ void __iomem *reg_base = this_cpu_read(intcl_reg);
+
+ writel_relaxed(d->hwirq, reg_base + INTCL_CENR);
+}
+
+static void csky_mpintc_eoi(struct irq_data *d)
+{
+ void __iomem *reg_base = this_cpu_read(intcl_reg);
+
+ writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
+}
+
+static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
+{
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ __trigger[d->hwirq] = 0;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __trigger[d->hwirq] = 1;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ __trigger[d->hwirq] = 2;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __trigger[d->hwirq] = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static int csky_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu;
+ unsigned int offset = 4 * (d->hwirq - COMM_IRQ_BASE);
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ /*
+ * The csky,mpintc could support auto irq deliver, but it only
+ * could deliver external irq to one cpu or all cpus. So it
+ * doesn't support deliver external irq to a group of cpus
+ * with cpu_mask.
+ * SO we only use auto deliver mode when affinity mask_val is
+ * equal to cpu_present_mask.
+ *
+ */
+ if (cpumask_equal(mask_val, cpu_present_mask))
+ cpu = 0;
+ else
+ cpu |= BIT(31);
+
+ writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip csky_irq_chip = {
+ .name = "C-SKY SMP Intc",
+ .irq_eoi = csky_mpintc_eoi,
+ .irq_unmask = csky_mpintc_unmask,
+ .irq_mask = csky_mpintc_mask,
+ .irq_set_type = csky_mpintc_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = csky_irq_set_affinity,
+#endif
+};
+
+static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ if (hwirq < COMM_IRQ_BASE) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &csky_irq_chip,
+ handle_percpu_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &csky_irq_chip,
+ handle_fasteoi_irq);
+ }
+
+ return 0;
+}
+
+static int csky_irq_domain_xlate_cells(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec,
+ unsigned int intsize, unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1)
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ else
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static const struct irq_domain_ops csky_irqdomain_ops = {
+ .map = csky_irqdomain_map,
+ .xlate = csky_irq_domain_xlate_cells,
+};
+
+#ifdef CONFIG_SMP
+static void csky_mpintc_send_ipi(const struct cpumask *mask)
+{
+ void __iomem *reg_base = this_cpu_read(intcl_reg);
+
+ /*
+ * INTCL_SIGR[3:0] INTID
+ * INTCL_SIGR[8:15] CPUMASK
+ */
+ writel_relaxed((*cpumask_bits(mask)) << 8 | IPI_IRQ,
+ reg_base + INTCL_SIGR);
+}
+#endif
+
+/* C-SKY multi processor interrupt controller */
+static int __init
+csky_mpintc_init(struct device_node *node, struct device_node *parent)
+{
+ int ret;
+ unsigned int cpu, nr_irq;
+#ifdef CONFIG_SMP
+ unsigned int ipi_irq;
+#endif
+
+ if (parent)
+ return 0;
+
+ ret = of_property_read_u32(node, "csky,num-irqs", &nr_irq);
+ if (ret < 0)
+ nr_irq = INTC_IRQS;
+
+ __trigger = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
+ if (__trigger == NULL)
+ return -ENXIO;
+
+ if (INTCG_base == NULL) {
+ INTCG_base = ioremap(mfcr("cr<31, 14>"),
+ INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
+ if (INTCG_base == NULL)
+ return -EIO;
+
+ INTCL_base = INTCG_base + INTCG_SIZE;
+
+ writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
+ }
+
+ root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
+ NULL);
+ if (!root_domain)
+ return -ENXIO;
+
+ /* for every cpu */
+ for_each_present_cpu(cpu) {
+ per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
+ writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
+ }
+
+ set_handle_irq(&csky_mpintc_handler);
+
+#ifdef CONFIG_SMP
+ ipi_irq = irq_create_mapping(root_domain, IPI_IRQ);
+ if (!ipi_irq)
+ return -EIO;
+
+ set_send_ipi(&csky_mpintc_send_ipi, ipi_irq);
+#endif
+
+ return 0;
+}
+IRQCHIP_DECLARE(csky_mpintc, "csky,mpintc", csky_mpintc_init);
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
new file mode 100644
index 000000000..123eb7bfc
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Copyright (C) 2006, 2019 Texas Instruments.
+//
+// Interrupt handler for DaVinci boards.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_AINTC_FIQ_REG0 0x00
+#define DAVINCI_AINTC_FIQ_REG1 0x04
+#define DAVINCI_AINTC_IRQ_REG0 0x08
+#define DAVINCI_AINTC_IRQ_REG1 0x0c
+#define DAVINCI_AINTC_IRQ_IRQENTRY 0x14
+#define DAVINCI_AINTC_IRQ_ENT_REG0 0x18
+#define DAVINCI_AINTC_IRQ_ENT_REG1 0x1c
+#define DAVINCI_AINTC_IRQ_INCTL_REG 0x20
+#define DAVINCI_AINTC_IRQ_EABASE_REG 0x24
+#define DAVINCI_AINTC_IRQ_INTPRI0_REG 0x30
+#define DAVINCI_AINTC_IRQ_INTPRI7_REG 0x4c
+
+static void __iomem *davinci_aintc_base;
+static struct irq_domain *davinci_aintc_irq_domain;
+
+static inline void davinci_aintc_writel(unsigned long value, int offset)
+{
+ writel_relaxed(value, davinci_aintc_base + offset);
+}
+
+static inline unsigned long davinci_aintc_readl(int offset)
+{
+ return readl_relaxed(davinci_aintc_base + offset);
+}
+
+static __init void
+davinci_aintc_setup_gc(void __iomem *base,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_get_domain_generic_chip(davinci_aintc_irq_domain, irq_start);
+ gc->reg_base = base;
+ gc->irq_base = irq_start;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+
+ ct->regs.ack = DAVINCI_AINTC_IRQ_REG0;
+ ct->regs.mask = DAVINCI_AINTC_IRQ_ENT_REG0;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
+static asmlinkage void __exception_irq_entry
+davinci_aintc_handle_irq(struct pt_regs *regs)
+{
+ int irqnr = davinci_aintc_readl(DAVINCI_AINTC_IRQ_IRQENTRY);
+
+ /*
+ * Use the formula for entry vector index generation from section
+ * 8.3.3 of the manual.
+ */
+ irqnr >>= 2;
+ irqnr -= 1;
+
+ generic_handle_domain_irq(davinci_aintc_irq_domain, irqnr);
+}
+
+/* ARM Interrupt Controller Initialization */
+void __init davinci_aintc_init(const struct davinci_aintc_config *config)
+{
+ unsigned int irq_off, reg_off, prio, shift;
+ void __iomem *req;
+ int ret, irq_base;
+ const u8 *prios;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return;
+ }
+
+ davinci_aintc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_aintc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return;
+ }
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ /* Disable all interrupts */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG0);
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG1);
+
+ /* Interrupts disabled immediately, IRQ entry reflects all */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_INCTL_REG);
+
+ /* we don't use the hardware vector table, just its entry addresses */
+ davinci_aintc_writel(0, DAVINCI_AINTC_IRQ_EABASE_REG);
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ prios = config->prios;
+ for (reg_off = DAVINCI_AINTC_IRQ_INTPRI0_REG;
+ reg_off <= DAVINCI_AINTC_IRQ_INTPRI7_REG; reg_off += 4) {
+ for (shift = 0, prio = 0; shift < 32; shift += 4, prios++)
+ prio |= (*prios & 0x07) << shift;
+ davinci_aintc_writel(prio, reg_off);
+ }
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return;
+ }
+
+ davinci_aintc_irq_domain = irq_domain_add_legacy(NULL,
+ config->num_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!davinci_aintc_irq_domain) {
+ pr_err("%s: unable to create interrupt domain\n", __func__);
+ return;
+ }
+
+ ret = irq_alloc_domain_generic_chips(davinci_aintc_irq_domain, 32, 1,
+ "AINTC", handle_edge_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0, 0);
+ if (ret) {
+ pr_err("%s: unable to allocate generic irq chips for domain\n",
+ __func__);
+ return;
+ }
+
+ for (irq_off = 0, reg_off = 0;
+ irq_off < config->num_irqs;
+ irq_off += 32, reg_off += 0x04)
+ davinci_aintc_setup_gc(davinci_aintc_base + reg_off,
+ irq_base + irq_off, 32);
+
+ set_handle_irq(davinci_aintc_handle_irq);
+}
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
new file mode 100644
index 000000000..7482c8ed3
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Author: Steve Chen <schen@mvista.com>
+// Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+// Copyright (C) 2019, Texas Instruments
+//
+// TI Common Platform Interrupt Controller (cp_intc) driver
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-davinci-cp-intc.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_CP_INTC_CTRL 0x04
+#define DAVINCI_CP_INTC_HOST_CTRL 0x0c
+#define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10
+#define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38
+#define DAVINCI_CP_INTC_PRIO_IDX 0x80
+#define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2))
+#define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2))
+#define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2))
+#define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0)
+#define DAVINCI_CP_INTC_GPIR_NONE BIT(31)
+
+static void __iomem *davinci_cp_intc_base;
+static struct irq_domain *davinci_cp_intc_irq_domain;
+
+static inline unsigned int davinci_cp_intc_read(unsigned int offset)
+{
+ return readl_relaxed(davinci_cp_intc_base + offset);
+}
+
+static inline void davinci_cp_intc_write(unsigned long value,
+ unsigned int offset)
+{
+ writel_relaxed(value, davinci_cp_intc_base + offset);
+}
+
+static void davinci_cp_intc_ack_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR);
+}
+
+static void davinci_cp_intc_mask_irq(struct irq_data *d)
+{
+ /* XXX don't know why we need to disable nIRQ here... */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+}
+
+static void davinci_cp_intc_unmask_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET);
+}
+
+static int davinci_cp_intc_set_irq_type(struct irq_data *d,
+ unsigned int flow_type)
+{
+ unsigned int reg, mask, polarity, type;
+
+ reg = BIT_WORD(d->hwirq);
+ mask = BIT_MASK(d->hwirq);
+ polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity |= mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ polarity &= ~mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ polarity |= mask;
+ type &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ polarity &= ~mask;
+ type &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ return 0;
+}
+
+static struct irq_chip davinci_cp_intc_irq_chip = {
+ .name = "cp_intc",
+ .irq_ack = davinci_cp_intc_ack_irq,
+ .irq_mask = davinci_cp_intc_mask_irq,
+ .irq_unmask = davinci_cp_intc_unmask_irq,
+ .irq_set_type = davinci_cp_intc_set_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static asmlinkage void __exception_irq_entry
+davinci_cp_intc_handle_irq(struct pt_regs *regs)
+{
+ int gpir, irqnr, none;
+
+ /*
+ * The interrupt number is in first ten bits. The NONE field set to 1
+ * indicates a spurious irq.
+ */
+
+ gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX);
+ irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK;
+ none = gpir & DAVINCI_CP_INTC_GPIR_NONE;
+
+ if (unlikely(none)) {
+ pr_err_once("%s: spurious irq!\n", __func__);
+ return;
+ }
+
+ generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr);
+}
+
+static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
+
+ irq_set_chip(virq, &davinci_cp_intc_irq_chip);
+ irq_set_probe(virq);
+ irq_set_handler(virq, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
+ .map = davinci_cp_intc_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init
+davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
+ struct device_node *node)
+{
+ unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ int offset, irq_base;
+ void __iomem *req;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return -EBUSY;
+ }
+
+ davinci_cp_intc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_cp_intc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ /* Disable all host interrupts */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0));
+
+ /* Disable system interrupts */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+
+ /* Set to normal mode, no nesting, no priority hold */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL);
+
+ /* Clear system interrupt status */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+
+ /* Enable nIRQ (what about nFIQ?) */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+
+ /* Default all priorities to channel 7. */
+ num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(0x07070707,
+ DAVINCI_CP_INTC_CHAN_MAP(offset));
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return irq_base;
+ }
+
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(
+ node, config->num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
+
+ if (!davinci_cp_intc_irq_domain) {
+ pr_err("%s: unable to create an interrupt domain\n", __func__);
+ return -EINVAL;
+ }
+
+ set_handle_irq(davinci_cp_intc_handle_irq);
+
+ /* Enable global interrupt */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ return 0;
+}
+
+int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
+{
+ return davinci_cp_intc_do_init(config, NULL);
+}
+
+static int __init davinci_cp_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct davinci_cp_intc_config config = { };
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &config.reg);
+ if (ret) {
+ pr_err("%s: unable to get the register range from device-tree\n",
+ __func__);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ if (ret) {
+ pr_err("%s: unable to read the 'ti,intc-size' property\n",
+ __func__);
+ return ret;
+ }
+
+ return davinci_cp_intc_do_init(&config, node);
+}
+IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
new file mode 100644
index 000000000..3b0d78aac
--- /dev/null
+++ b/drivers/irqchip/irq-digicolor.c
@@ -0,0 +1,119 @@
+/*
+ * Conexant Digicolor SoCs IRQ chip driver
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2014 Paradox Innovation Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <asm/exception.h>
+
+#define UC_IRQ_CONTROL 0x04
+
+#define IC_FLAG_CLEAR_LO 0x00
+#define IC_FLAG_CLEAR_XLO 0x04
+#define IC_INT0ENABLE_LO 0x10
+#define IC_INT0ENABLE_XLO 0x14
+#define IC_INT0STATUS_LO 0x18
+#define IC_INT0STATUS_XLO 0x1c
+
+static struct irq_domain *digicolor_irq_domain;
+
+static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
+{
+ struct irq_domain_chip_generic *dgc = digicolor_irq_domain->gc;
+ struct irq_chip_generic *gc = dgc->gc[0];
+ u32 status, hwirq;
+
+ do {
+ status = irq_reg_readl(gc, IC_INT0STATUS_LO);
+ if (status) {
+ hwirq = ffs(status) - 1;
+ } else {
+ status = irq_reg_readl(gc, IC_INT0STATUS_XLO);
+ if (status)
+ hwirq = ffs(status) - 1 + 32;
+ else
+ return;
+ }
+
+ generic_handle_domain_irq(digicolor_irq_domain, hwirq);
+ } while (1);
+}
+
+static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
+ unsigned en_reg, unsigned ack_reg)
+{
+ struct irq_chip_generic *gc;
+
+ gc = irq_get_domain_generic_chip(digicolor_irq_domain, irq_base);
+ gc->reg_base = reg_base;
+ gc->chip_types[0].regs.ack = ack_reg;
+ gc->chip_types[0].regs.mask = en_reg;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+}
+
+static int __init digicolor_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *reg_base;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct regmap *ucregs;
+ int ret;
+
+ reg_base = of_iomap(node, 0);
+ if (!reg_base) {
+ pr_err("%pOF: unable to map IC registers\n", node);
+ return -ENXIO;
+ }
+
+ /* disable all interrupts */
+ writel(0, reg_base + IC_INT0ENABLE_LO);
+ writel(0, reg_base + IC_INT0ENABLE_XLO);
+
+ ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(ucregs)) {
+ pr_err("%pOF: unable to map UC registers\n", node);
+ return PTR_ERR(ucregs);
+ }
+ /* channel 1, regular IRQs */
+ regmap_write(ucregs, UC_IRQ_CONTROL, 1);
+
+ digicolor_irq_domain =
+ irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
+ if (!digicolor_irq_domain) {
+ pr_err("%pOF: unable to create IRQ domain\n", node);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(digicolor_irq_domain, 32, 1,
+ "digicolor_irq", handle_level_irq,
+ clr, 0, 0);
+ if (ret) {
+ pr_err("%pOF: unable to allocate IRQ gc\n", node);
+ return ret;
+ }
+
+ digicolor_set_gc(reg_base, 0, IC_INT0ENABLE_LO, IC_FLAG_CLEAR_LO);
+ digicolor_set_gc(reg_base, 32, IC_INT0ENABLE_XLO, IC_FLAG_CLEAR_XLO);
+
+ set_handle_irq(digicolor_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(conexant_digicolor_ic, "cnxt,cx92755-ic", digicolor_of_init);
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
new file mode 100644
index 000000000..d5c1c750c
--- /dev/null
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -0,0 +1,218 @@
+/*
+ * Synopsys DW APB ICTL irqchip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * based on GPL'ed 2.6 kernel sources
+ * (c) Marvell International Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+
+#define APB_INT_ENABLE_L 0x00
+#define APB_INT_ENABLE_H 0x04
+#define APB_INT_MASK_L 0x08
+#define APB_INT_MASK_H 0x0c
+#define APB_INT_FINALSTATUS_L 0x30
+#define APB_INT_FINALSTATUS_H 0x34
+#define APB_INT_BASE_OFFSET 0x04
+
+/* irq domain of the primary interrupt controller. */
+static struct irq_domain *dw_apb_ictl_irq_domain;
+
+static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs)
+{
+ struct irq_domain *d = dw_apb_ictl_irq_domain;
+ int n;
+
+ for (n = 0; n < d->revmap_size; n += 32) {
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+ u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
+ while (stat) {
+ u32 hwirq = ffs(stat) - 1;
+
+ generic_handle_domain_irq(d, hwirq);
+ stat &= ~BIT(hwirq);
+ }
+ }
+}
+
+static void dw_apb_ictl_handle_irq_cascaded(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int n;
+
+ chained_irq_enter(chip, desc);
+
+ for (n = 0; n < d->revmap_size; n += 32) {
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+ u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
+ while (stat) {
+ u32 hwirq = ffs(stat) - 1;
+ generic_handle_domain_irq(d, gc->irq_base + hwirq);
+
+ stat &= ~BIT(hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int dw_apb_ictl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_map_generic_chip(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static const struct irq_domain_ops dw_apb_ictl_irq_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = dw_apb_ictl_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+#ifdef CONFIG_PM
+static void dw_apb_ictl_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ irq_gc_lock(gc);
+ writel_relaxed(~0, gc->reg_base + ct->regs.enable);
+ writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
+ irq_gc_unlock(gc);
+}
+#else
+#define dw_apb_ictl_resume NULL
+#endif /* CONFIG_PM */
+
+static int __init dw_apb_ictl_init(struct device_node *np,
+ struct device_node *parent)
+{
+ const struct irq_domain_ops *domain_ops;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct resource r;
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ void __iomem *iobase;
+ int ret, nrirqs, parent_irq, i;
+ u32 reg;
+
+ if (!parent) {
+ /* Used as the primary interrupt controller */
+ parent_irq = 0;
+ domain_ops = &dw_apb_ictl_irq_domain_ops;
+ } else {
+ /* Map the parent interrupt for the chained handler */
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (parent_irq <= 0) {
+ pr_err("%pOF: unable to parse irq\n", np);
+ return -EINVAL;
+ }
+ domain_ops = &irq_generic_chip_ops;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret) {
+ pr_err("%pOF: unable to get resource\n", np);
+ return ret;
+ }
+
+ if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
+ pr_err("%pOF: unable to request mem region\n", np);
+ return -ENOMEM;
+ }
+
+ iobase = ioremap(r.start, resource_size(&r));
+ if (!iobase) {
+ pr_err("%pOF: unable to map resource\n", np);
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ /*
+ * DW IP can be configured to allow 2-64 irqs. We can determine
+ * the number of irqs supported by writing into enable register
+ * and look for bits not set, as corresponding flip-flops will
+ * have been removed by synthesis tool.
+ */
+
+ /* mask and enable all interrupts */
+ writel_relaxed(~0, iobase + APB_INT_MASK_L);
+ writel_relaxed(~0, iobase + APB_INT_MASK_H);
+ writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
+ writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
+
+ reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
+ if (reg)
+ nrirqs = 32 + fls(reg);
+ else
+ nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
+
+ domain = irq_domain_add_linear(np, nrirqs, domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOF: unable to add irq domain\n", np);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
+ handle_level_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%pOF: unable to alloc irq domain gc\n", np);
+ goto err_unmap;
+ }
+
+ for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+ gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
+ gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+ gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
+ }
+
+ if (parent_irq) {
+ irq_set_chained_handler_and_data(parent_irq,
+ dw_apb_ictl_handle_irq_cascaded, domain);
+ } else {
+ dw_apb_ictl_irq_domain = domain;
+ set_handle_irq(dw_apb_ictl_handle_irq);
+ }
+
+ return 0;
+
+err_unmap:
+ iounmap(iobase);
+err_release:
+ release_mem_region(r.start, resource_size(&r));
+ return ret;
+}
+IRQCHIP_DECLARE(dw_apb_ictl,
+ "snps,dw-apb-ictl", dw_apb_ictl_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
new file mode 100644
index 000000000..46a3aa60e
--- /dev/null
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the Faraday Technology FTINTC010 Copyright (C) 2017 Linus
+ * Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-gemini/irq.c
+ * Copyright (C) 2001-2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define FT010_NUM_IRQS 32
+
+#define FT010_IRQ_SOURCE(base_addr) (base_addr + 0x00)
+#define FT010_IRQ_MASK(base_addr) (base_addr + 0x04)
+#define FT010_IRQ_CLEAR(base_addr) (base_addr + 0x08)
+/* Selects level- or edge-triggered */
+#define FT010_IRQ_MODE(base_addr) (base_addr + 0x0C)
+/* Selects active low/high or falling/rising edge */
+#define FT010_IRQ_POLARITY(base_addr) (base_addr + 0x10)
+#define FT010_IRQ_STATUS(base_addr) (base_addr + 0x14)
+#define FT010_FIQ_SOURCE(base_addr) (base_addr + 0x20)
+#define FT010_FIQ_MASK(base_addr) (base_addr + 0x24)
+#define FT010_FIQ_CLEAR(base_addr) (base_addr + 0x28)
+#define FT010_FIQ_MODE(base_addr) (base_addr + 0x2C)
+#define FT010_FIQ_POLARITY(base_addr) (base_addr + 0x30)
+#define FT010_FIQ_STATUS(base_addr) (base_addr + 0x34)
+
+/**
+ * struct ft010_irq_data - irq data container for the Faraday IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ft010_irq_data {
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+};
+
+static void ft010_irq_mask(struct irq_data *d)
+{
+ struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+ unsigned int mask;
+
+ mask = readl(FT010_IRQ_MASK(f->base));
+ mask &= ~BIT(irqd_to_hwirq(d));
+ writel(mask, FT010_IRQ_MASK(f->base));
+}
+
+static void ft010_irq_unmask(struct irq_data *d)
+{
+ struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+ unsigned int mask;
+
+ mask = readl(FT010_IRQ_MASK(f->base));
+ mask |= BIT(irqd_to_hwirq(d));
+ writel(mask, FT010_IRQ_MASK(f->base));
+}
+
+static void ft010_irq_ack(struct irq_data *d)
+{
+ struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), FT010_IRQ_CLEAR(f->base));
+}
+
+static int ft010_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct ft010_irq_data *f = irq_data_get_irq_chip_data(d);
+ int offset = irqd_to_hwirq(d);
+ u32 mode, polarity;
+
+ mode = readl(FT010_IRQ_MODE(f->base));
+ polarity = readl(FT010_IRQ_POLARITY(f->base));
+
+ if (trigger & (IRQ_TYPE_LEVEL_LOW)) {
+ irq_set_handler_locked(d, handle_level_irq);
+ mode &= ~BIT(offset);
+ polarity |= BIT(offset);
+ } else if (trigger & (IRQ_TYPE_LEVEL_HIGH)) {
+ irq_set_handler_locked(d, handle_level_irq);
+ mode &= ~BIT(offset);
+ polarity &= ~BIT(offset);
+ } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+ irq_set_handler_locked(d, handle_edge_irq);
+ mode |= BIT(offset);
+ polarity |= BIT(offset);
+ } else if (trigger & IRQ_TYPE_EDGE_RISING) {
+ irq_set_handler_locked(d, handle_edge_irq);
+ mode |= BIT(offset);
+ polarity &= ~BIT(offset);
+ } else {
+ irq_set_handler_locked(d, handle_bad_irq);
+ pr_warn("Faraday IRQ: no supported trigger selected for line %d\n",
+ offset);
+ }
+
+ writel(mode, FT010_IRQ_MODE(f->base));
+ writel(polarity, FT010_IRQ_POLARITY(f->base));
+
+ return 0;
+}
+
+static struct irq_chip ft010_irq_chip = {
+ .name = "FTINTC010",
+ .irq_ack = ft010_irq_ack,
+ .irq_mask = ft010_irq_mask,
+ .irq_unmask = ft010_irq_unmask,
+ .irq_set_type = ft010_irq_set_type,
+};
+
+/* Local static for the IRQ entry call */
+static struct ft010_irq_data firq;
+
+asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
+{
+ struct ft010_irq_data *f = &firq;
+ int irq;
+ u32 status;
+
+ while ((status = readl(FT010_IRQ_STATUS(f->base)))) {
+ irq = ffs(status) - 1;
+ generic_handle_domain_irq(f->domain, irq);
+ }
+}
+
+static int ft010_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct ft010_irq_data *f = d->host_data;
+
+ irq_set_chip_data(irq, f);
+ /* All IRQs should set up their type, flags as bad by default */
+ irq_set_chip_and_handler(irq, &ft010_irq_chip, handle_bad_irq);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static void ft010_irqdomain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ft010_irqdomain_ops = {
+ .map = ft010_irqdomain_map,
+ .unmap = ft010_irqdomain_unmap,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+int __init ft010_of_init_irq(struct device_node *node,
+ struct device_node *parent)
+{
+ struct ft010_irq_data *f = &firq;
+
+ /*
+ * Disable the idle handler by default since it is buggy
+ * For more info see arch/arm/mach-gemini/idle.c
+ */
+ cpu_idle_poll_ctrl(true);
+
+ f->base = of_iomap(node, 0);
+ WARN(!f->base, "unable to map gemini irq registers\n");
+
+ /* Disable all interrupts */
+ writel(0, FT010_IRQ_MASK(f->base));
+ writel(0, FT010_FIQ_MASK(f->base));
+
+ f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0,
+ &ft010_irqdomain_ops, f);
+ set_handle_irq(ft010_irqchip_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(faraday, "faraday,ftintc010",
+ ft010_of_init_irq);
+IRQCHIP_DECLARE(gemini, "cortina,gemini-interrupt-controller",
+ ft010_of_init_irq);
+IRQCHIP_DECLARE(moxa, "moxa,moxart-ic",
+ ft010_of_init_irq);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644
index 000000000..afd6a1841
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+void gic_enable_of_quirks(const struct device_node *np,
+ const struct gic_quirk *quirks, void *data)
+{
+ for (; quirks->desc; quirks++) {
+ if (!quirks->compatible && !quirks->property)
+ continue;
+ if (quirks->compatible &&
+ !of_device_is_compatible(np, quirks->compatible))
+ continue;
+ if (quirks->property &&
+ !of_property_read_bool(np, quirks->property))
+ continue;
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
+ }
+}
+
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+ void *data)
+{
+ for (; quirks->desc; quirks++) {
+ if (quirks->compatible || quirks->property)
+ continue;
+ if (quirks->iidr != (quirks->mask & iidr))
+ continue;
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
+ }
+}
+
+int gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void))
+{
+ u32 confmask = 0x2 << ((irq % 16) * 2);
+ u32 confoff = (irq / 16) * 4;
+ u32 val, oldval;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * Read current configuration register, and insert the config
+ * for "irq", depending on "type".
+ */
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
+ val = oldval = readl_relaxed(base + confoff);
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ val &= ~confmask;
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ val |= confmask;
+
+ /* If the current configuration is the same, then we are done */
+ if (val == oldval) {
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+ return 0;
+ }
+
+ /*
+ * Write back the new configuration, and possibly re-enable
+ * the interrupt. If we fail to write a new configuration for
+ * an SPI then WARN and return an error. If we fail to write the
+ * configuration for a PPI this is most likely because the GIC
+ * does not allow us to set the configuration or we are in a
+ * non-secure mode, and hence it may not be catastrophic.
+ */
+ writel_relaxed(val, base + confoff);
+ if (readl_relaxed(base + confoff) != val)
+ ret = -EINVAL;
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+ if (sync_access)
+ sync_access();
+
+ return ret;
+}
+
+void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void))
+{
+ unsigned int i;
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < gic_irqs; i += 16)
+ writel_relaxed(GICD_INT_ACTLOW_LVLTRIG,
+ base + GIC_DIST_CONFIG + i / 4);
+
+ /*
+ * Set priority on all global interrupts.
+ */
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
+
+ /*
+ * Deactivate and disable all SPIs. Leave the PPI and SGIs
+ * alone as they are in the redistributor registers on GICv3.
+ */
+ for (i = 32; i < gic_irqs; i += 32) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ACTIVE_CLEAR + i / 8);
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ENABLE_CLEAR + i / 8);
+ }
+
+ if (sync_access)
+ sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
+{
+ int i;
+
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * private interrupts. Make sure everything is deactivated.
+ */
+ for (i = 0; i < nr; i += 32) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ACTIVE_CLEAR + i / 8);
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ENABLE_CLEAR + i / 8);
+ }
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < nr; i += 4)
+ writel_relaxed(GICD_INT_DEF_PRI_X4,
+ base + GIC_DIST_PRI + i * 4 / 4);
+
+ if (sync_access)
+ sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644
index 000000000..3db4592cd
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/arm-gic-common.h>
+
+struct gic_quirk {
+ const char *desc;
+ const char *compatible;
+ const char *property;
+ bool (*init)(void *data);
+ u32 iidr;
+ u32 mask;
+};
+
+int gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void));
+void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+ void *data);
+void gic_enable_of_quirks(const struct device_node *np,
+ const struct gic_quirk *quirks, void *data);
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
new file mode 100644
index 000000000..3989d16f9
--- /dev/null
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct gic_clk_data {
+ unsigned int num_clocks;
+ const char *const *clocks;
+};
+
+struct gic_chip_pm {
+ struct gic_chip_data *chip_data;
+ const struct gic_clk_data *clk_data;
+ struct clk_bulk_data *clks;
+};
+
+static int gic_runtime_resume(struct device *dev)
+{
+ struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+ struct gic_chip_data *gic = chip_pm->chip_data;
+ const struct gic_clk_data *data = chip_pm->clk_data;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks);
+ if (ret)
+ return ret;
+
+ /*
+ * On the very first resume, the pointer to chip_pm->chip_data
+ * will be NULL and this is intentional, because we do not
+ * want to restore the GIC on the very first resume. So if
+ * the pointer is not valid just return.
+ */
+ if (!gic)
+ return 0;
+
+ gic_dist_restore(gic);
+ gic_cpu_restore(gic);
+
+ return 0;
+}
+
+static int gic_runtime_suspend(struct device *dev)
+{
+ struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+ struct gic_chip_data *gic = chip_pm->chip_data;
+ const struct gic_clk_data *data = chip_pm->clk_data;
+
+ gic_dist_save(gic);
+ gic_cpu_save(gic);
+
+ clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks);
+
+ return 0;
+}
+
+static int gic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct gic_clk_data *data;
+ struct gic_chip_pm *chip_pm;
+ int ret, irq, i;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+
+ chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL);
+ if (!chip_pm)
+ return -ENOMEM;
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_err(dev, "no parent interrupt found!\n");
+ return -EINVAL;
+ }
+
+ chip_pm->clks = devm_kcalloc(dev, data->num_clocks,
+ sizeof(*chip_pm->clks), GFP_KERNEL);
+ if (!chip_pm->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_clocks; i++)
+ chip_pm->clks[i].id = data->clocks[i];
+
+ ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks);
+ if (ret)
+ goto irq_dispose;
+
+ chip_pm->clk_data = data;
+ dev_set_drvdata(dev, chip_pm);
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = gic_of_init_child(dev, &chip_pm->chip_data, irq);
+ if (ret)
+ goto rpm_put;
+
+ pm_runtime_put(dev);
+
+ dev_info(dev, "GIC IRQ controller registered\n");
+
+ return 0;
+
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+irq_dispose:
+ irq_dispose_mapping(irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops gic_pm_ops = {
+ SET_RUNTIME_PM_OPS(gic_runtime_suspend,
+ gic_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const char * const gic400_clocks[] = {
+ "clk",
+};
+
+static const struct gic_clk_data gic400_data = {
+ .num_clocks = ARRAY_SIZE(gic400_clocks),
+ .clocks = gic400_clocks,
+};
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gic_match);
+
+static struct platform_driver gic_driver = {
+ .probe = gic_probe,
+ .driver = {
+ .name = "gic",
+ .of_match_table = gic_match,
+ .pm = &gic_pm_ops,
+ }
+};
+
+builtin_platform_driver(gic_driver);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
new file mode 100644
index 000000000..38fab02ff
--- /dev/null
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Special GIC quirks for the ARM RealView
+ * Copyright (C) 2015 Linus Walleij
+ */
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+
+#define REALVIEW_SYS_LOCK_OFFSET 0x20
+#define REALVIEW_SYS_PLD_CTRL1 0x74
+#define REALVIEW_EB_REVB_SYS_PLD_CTRL1 0xD8
+#define VERSATILE_LOCK_VAL 0xA05F
+#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24)
+#define PLD_INTMODE_LEGACY 0x0
+#define PLD_INTMODE_NEW_DCC BIT(22)
+#define PLD_INTMODE_NEW_NO_DCC BIT(23)
+#define PLD_INTMODE_FIQ_ENABLE BIT(24)
+
+/* For some reason RealView EB Rev B moved this register */
+static const struct of_device_id syscon_pldset_of_match[] = {
+ {
+ .compatible = "arm,realview-eb11mp-revb-syscon",
+ .data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb11mp-revc-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {},
+};
+
+static int __init
+realview_gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct regmap *map;
+ struct device_node *np;
+ const struct of_device_id *gic_id;
+ u32 pld1_ctrl;
+
+ np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
+ &gic_id);
+ if (!np)
+ return -ENODEV;
+ pld1_ctrl = (u32)gic_id->data;
+
+ /* The PB11MPCore GIC needs to be configured in the syscon */
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (!IS_ERR(map)) {
+ /* new irq mode with no DCC */
+ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
+ regmap_update_bits(map, pld1_ctrl,
+ PLD_INTMODE_NEW_NO_DCC,
+ PLD_INTMODE_MASK);
+ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
+ pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
+ } else {
+ pr_err("RealView GIC setup: could not find syscon\n");
+ return -ENODEV;
+ }
+ return gic_of_init(node, parent);
+}
+IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
+IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
new file mode 100644
index 000000000..6e1ac330d
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM GIC v2m MSI(-X) support
+ * Support for Message Signaled Interrupts for systems that
+ * implement ARM Generic Interrupt Controller: GICv2m.
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
+ * Brandon Anderson <brandon.anderson@amd.com>
+ */
+
+#define pr_fmt(fmt) "GICv2m: " fmt
+
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irqchip/arm-gic.h>
+
+/*
+* MSI_TYPER:
+* [31:26] Reserved
+* [25:16] lowest SPI assigned to MSI
+* [15:10] Reserved
+* [9:0] Numer of SPIs assigned to MSI
+*/
+#define V2M_MSI_TYPER 0x008
+#define V2M_MSI_TYPER_BASE_SHIFT 16
+#define V2M_MSI_TYPER_BASE_MASK 0x3FF
+#define V2M_MSI_TYPER_NUM_MASK 0x3FF
+#define V2M_MSI_SETSPI_NS 0x040
+#define V2M_MIN_SPI 32
+#define V2M_MAX_SPI 1019
+#define V2M_MSI_IIDR 0xFCC
+
+#define V2M_MSI_TYPER_BASE_SPI(x) \
+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
+
+#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
+
+/* APM X-Gene with GICv2m MSI_IIDR register value */
+#define XGENE_GICV2M_MSI_IIDR 0x06000170
+
+/* Broadcom NS2 GICv2m MSI_IIDR register value */
+#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
+
+/* List of flags for specific v2m implementation */
+#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
+#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
+
+static LIST_HEAD(v2m_nodes);
+static DEFINE_SPINLOCK(v2m_lock);
+
+struct v2m_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ struct resource res; /* GICv2m resource */
+ void __iomem *base; /* GICv2m virt address */
+ u32 spi_start; /* The SPI number that MSIs start */
+ u32 nr_spis; /* The number of SPIs for MSIs */
+ u32 spi_offset; /* offset to be subtracted from SPI number */
+ unsigned long *bm; /* MSI vector bitmap */
+ u32 flags; /* v2m flags for specific implementation */
+};
+
+static void gicv2m_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void gicv2m_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip gicv2m_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = gicv2m_mask_msi_irq,
+ .irq_unmask = gicv2m_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+};
+
+static struct msi_domain_info gicv2m_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &gicv2m_msi_irq_chip,
+};
+
+static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
+{
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ return v2m->res.start | ((hwirq - 32) << 3);
+ else
+ return v2m->res.start + V2M_MSI_SETSPI_NS;
+}
+
+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ msg->data = 0;
+ else
+ msg->data = data->hwirq;
+ if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
+ msg->data -= v2m->spi_offset;
+
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+}
+
+static struct irq_chip gicv2m_irq_chip = {
+ .name = "GICv2m",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = gicv2m_compose_msi_msg,
+};
+
+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int err;
+
+ if (is_of_node(domain->parent->fwnode)) {
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ return -EINVAL;
+ }
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return 0;
+}
+
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
+ int nr_irqs)
+{
+ spin_lock(&v2m_lock);
+ bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
+ get_count_order(nr_irqs));
+ spin_unlock(&v2m_lock);
+}
+
+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct v2m_data *v2m = NULL, *tmp;
+ int hwirq, offset, i, err = 0;
+
+ spin_lock(&v2m_lock);
+ list_for_each_entry(tmp, &v2m_nodes, entry) {
+ offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
+ get_count_order(nr_irqs));
+ if (offset >= 0) {
+ v2m = tmp;
+ break;
+ }
+ }
+ spin_unlock(&v2m_lock);
+
+ if (!v2m)
+ return -ENOSPC;
+
+ hwirq = v2m->spi_start + offset;
+
+ err = iommu_dma_prepare_msi(info->desc,
+ gicv2m_get_msi_addr(v2m, hwirq));
+ if (err)
+ return err;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto fail;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &gicv2m_irq_chip, v2m);
+ }
+
+ return 0;
+
+fail:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
+ return err;
+}
+
+static void gicv2m_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
+
+ gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops gicv2m_domain_ops = {
+ .alloc = gicv2m_irq_domain_alloc,
+ .free = gicv2m_irq_domain_free,
+};
+
+static bool is_msi_spi_valid(u32 base, u32 num)
+{
+ if (base < V2M_MIN_SPI) {
+ pr_err("Invalid MSI base SPI (base:%u)\n", base);
+ return false;
+ }
+
+ if ((num == 0) || (base + num > V2M_MAX_SPI)) {
+ pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
+ num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
+ return false;
+ }
+
+ return true;
+}
+
+static struct irq_chip gicv2m_pmsi_irq_chip = {
+ .name = "pMSI",
+};
+
+static struct msi_domain_ops gicv2m_pmsi_ops = {
+};
+
+static struct msi_domain_info gicv2m_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &gicv2m_pmsi_ops,
+ .chip = &gicv2m_pmsi_irq_chip,
+};
+
+static void gicv2m_teardown(void)
+{
+ struct v2m_data *v2m, *tmp;
+
+ list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
+ list_del(&v2m->entry);
+ bitmap_free(v2m->bm);
+ iounmap(v2m->base);
+ of_node_put(to_of_node(v2m->fwnode));
+ if (is_fwnode_irqchip(v2m->fwnode))
+ irq_domain_free_fwnode(v2m->fwnode);
+ kfree(v2m);
+ }
+}
+
+static int gicv2m_allocate_domains(struct irq_domain *parent)
+{
+ struct irq_domain *inner_domain, *pci_domain, *plat_domain;
+ struct v2m_data *v2m;
+
+ v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
+ if (!v2m)
+ return 0;
+
+ inner_domain = irq_domain_create_tree(v2m->fwnode,
+ &gicv2m_domain_ops, v2m);
+ if (!inner_domain) {
+ pr_err("Failed to create GICv2m domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+ inner_domain->parent = parent;
+ pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
+ &gicv2m_msi_domain_info,
+ inner_domain);
+ plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
+ &gicv2m_pmsi_domain_info,
+ inner_domain);
+ if (!pci_domain || !plat_domain) {
+ pr_err("Failed to create MSI domains\n");
+ if (plat_domain)
+ irq_domain_remove(plat_domain);
+ if (pci_domain)
+ irq_domain_remove(pci_domain);
+ irq_domain_remove(inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
+ u32 spi_start, u32 nr_spis,
+ struct resource *res, u32 flags)
+{
+ int ret;
+ struct v2m_data *v2m;
+
+ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
+ if (!v2m)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&v2m->entry);
+ v2m->fwnode = fwnode;
+ v2m->flags = flags;
+
+ memcpy(&v2m->res, res, sizeof(struct resource));
+
+ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
+ if (!v2m->base) {
+ pr_err("Failed to map GICv2m resource\n");
+ ret = -ENOMEM;
+ goto err_free_v2m;
+ }
+
+ if (spi_start && nr_spis) {
+ v2m->spi_start = spi_start;
+ v2m->nr_spis = nr_spis;
+ } else {
+ u32 typer;
+
+ /* Graviton should always have explicit spi_start/nr_spis */
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+
+ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
+ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
+ }
+
+ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ /*
+ * APM X-Gene GICv2m implementation has an erratum where
+ * the MSI data needs to be the offset from the spi_start
+ * in order to trigger the correct MSI interrupt. This is
+ * different from the standard GICv2m implementation where
+ * the MSI data is the absolute value within the range from
+ * spi_start to (spi_start + num_spis).
+ *
+ * Broadcom NS2 GICv2m implementation has an erratum where the MSI data
+ * is 'spi_number - 32'
+ *
+ * Reading that register fails on the Graviton implementation
+ */
+ if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
+ switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+ case XGENE_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = v2m->spi_start;
+ break;
+ case BCM_NS2_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = 32;
+ break;
+ }
+ }
+ v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL);
+ if (!v2m->bm) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ list_add_tail(&v2m->entry, &v2m_nodes);
+
+ pr_info("range%pR, SPI[%d:%d]\n", res,
+ v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
+ return 0;
+
+err_iounmap:
+ iounmap(v2m->base);
+err_free_v2m:
+ kfree(v2m);
+ return ret;
+}
+
+static const struct of_device_id gicv2m_device_id[] = {
+ { .compatible = "arm,gic-v2m-frame", },
+ {},
+};
+
+static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+ struct irq_domain *parent)
+{
+ int ret = 0;
+ struct device_node *node = to_of_node(parent_handle);
+ struct device_node *child;
+
+ for (child = of_find_matching_node(node, gicv2m_device_id); child;
+ child = of_find_matching_node(child, gicv2m_device_id)) {
+ u32 spi_start = 0, nr_spis = 0;
+ struct resource res;
+
+ if (!of_find_property(child, "msi-controller", NULL))
+ continue;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate v2m resource.\n");
+ break;
+ }
+
+ if (!of_property_read_u32(child, "arm,msi-base-spi",
+ &spi_start) &&
+ !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
+ pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ spi_start, nr_spis);
+
+ ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ &res, 0);
+ if (ret) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ if (!ret)
+ ret = gicv2m_allocate_domains(parent);
+ if (ret)
+ gicv2m_teardown();
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+static int acpi_num_msi;
+
+static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
+{
+ struct v2m_data *data;
+
+ if (WARN_ON(acpi_num_msi <= 0))
+ return NULL;
+
+ /* We only return the fwnode of the first MSI frame. */
+ data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
+ if (!data)
+ return NULL;
+
+ return data->fwnode;
+}
+
+static bool acpi_check_amazon_graviton_quirks(void)
+{
+ static struct acpi_table_madt *madt;
+ acpi_status status;
+ bool rc = false;
+
+#define ACPI_AMZN_OEM_ID "AMAZON"
+
+ status = acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
+
+ if (ACPI_FAILURE(status) || !madt)
+ return rc;
+ rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
+ acpi_put_table((struct acpi_table_header *)madt);
+
+ return rc;
+}
+
+static int __init
+acpi_parse_madt_msi(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ int ret;
+ struct resource res;
+ u32 spi_start = 0, nr_spis = 0;
+ struct acpi_madt_generic_msi_frame *m;
+ struct fwnode_handle *fwnode;
+ u32 flags = 0;
+
+ m = (struct acpi_madt_generic_msi_frame *)header;
+ if (BAD_MADT_ENTRY(m, end))
+ return -EINVAL;
+
+ res.start = m->base_address;
+ res.end = m->base_address + SZ_4K - 1;
+ res.flags = IORESOURCE_MEM;
+
+ if (acpi_check_amazon_graviton_quirks()) {
+ pr_info("applying Amazon Graviton quirk\n");
+ res.end = res.start + SZ_8K - 1;
+ flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
+ gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
+ }
+
+ if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
+ spi_start = m->spi_base;
+ nr_spis = m->spi_count;
+
+ pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ spi_start, nr_spis);
+ }
+
+ fwnode = irq_domain_alloc_fwnode(&res.start);
+ if (!fwnode) {
+ pr_err("Unable to allocate GICv2m domain token\n");
+ return -EINVAL;
+ }
+
+ ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
+ if (ret)
+ irq_domain_free_fwnode(fwnode);
+
+ return ret;
+}
+
+static int __init gicv2m_acpi_init(struct irq_domain *parent)
+{
+ int ret;
+
+ if (acpi_num_msi > 0)
+ return 0;
+
+ acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
+ acpi_parse_madt_msi, 0);
+
+ if (acpi_num_msi <= 0)
+ goto err_out;
+
+ ret = gicv2m_allocate_domains(parent);
+ if (ret)
+ goto err_out;
+
+ pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
+
+ return 0;
+
+err_out:
+ gicv2m_teardown();
+ return -EINVAL;
+}
+#else /* CONFIG_ACPI */
+static int __init gicv2m_acpi_init(struct irq_domain *parent)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+int __init gicv2m_init(struct fwnode_handle *parent_handle,
+ struct irq_domain *parent)
+{
+ if (is_of_node(parent_handle))
+ return gicv2m_of_init(parent_handle, parent);
+
+ return gicv2m_acpi_init(parent);
+}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
new file mode 100644
index 000000000..634263dfd
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/fsl/mc.h>
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-fMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = msi_domain_set_affinity
+};
+
+static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
+ struct fsl_mc_device *mc_dev)
+{
+ struct device_node *of_node;
+ u32 out_id;
+
+ of_node = irq_domain_get_of_node(domain);
+ out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
+ iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
+
+ return out_id;
+}
+
+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+ struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct msi_domain_info *msi_info;
+
+ if (!dev_is_fsl_mc(dev))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(dev);
+ if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC))
+ return -EINVAL;
+
+ /*
+ * Set the device Id to be passed to the GIC-ITS:
+ *
+ * NOTE: This device id corresponds to the IOMMU stream ID
+ * associated with the DPRC object (ICID).
+ */
+ info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain,
+ mc_bus_dev);
+ msi_info = msi_get_domain_info(msi_domain->parent);
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
+ .msi_prepare = its_fsl_mc_msi_prepare,
+};
+
+static struct msi_domain_info its_fsl_mc_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &its_fsl_mc_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static void __init its_fsl_mc_msi_init_one(struct fwnode_handle *handle,
+ const char *name)
+{
+ struct irq_domain *parent;
+ struct irq_domain *mc_msi_domain;
+
+ parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n", name);
+ return;
+ }
+
+ mc_msi_domain = fsl_mc_msi_create_irq_domain(handle,
+ &its_fsl_mc_msi_domain_info,
+ parent);
+ if (!mc_msi_domain) {
+ pr_err("%s: unable to create fsl-mc domain\n", name);
+ return;
+ }
+
+ pr_info("fsl-mc MSI: %s domain created\n", name);
+}
+
+#ifdef CONFIG_ACPI
+static int __init
+its_fsl_mc_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct fwnode_handle *dom_handle;
+ const char *node_name;
+ int err = 0;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+ (long)its_entry->base_address);
+
+ dom_handle = iort_find_domain_token(its_entry->translation_id);
+ if (!dom_handle) {
+ pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+ err = -ENXIO;
+ goto out;
+ }
+
+ its_fsl_mc_msi_init_one(dom_handle, node_name);
+
+out:
+ kfree(node_name);
+ return err;
+}
+
+
+static void __init its_fsl_mc_acpi_msi_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_fsl_mc_msi_parse_madt, 0);
+}
+#else
+static inline void its_fsl_mc_acpi_msi_init(void) { }
+#endif
+
+static void __init its_fsl_mc_of_msi_init(void)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ its_fsl_mc_msi_init_one(of_node_to_fwnode(np),
+ np->full_name);
+ }
+}
+
+static int __init its_fsl_mc_msi_init(void)
+{
+ its_fsl_mc_of_msi_init();
+ its_fsl_mc_acpi_msi_init();
+
+ return 0;
+}
+
+early_initcall(its_fsl_mc_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
new file mode 100644
index 000000000..93f77a819
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/acpi_iort.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-MSI",
+ .irq_unmask = its_unmask_msi_irq,
+ .irq_mask = its_mask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
+{
+ int msi, msix, *count = data;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+ *count += max(msi, msix);
+
+ return 0;
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct pci_dev **alias_dev = data;
+
+ *alias_dev = pdev;
+
+ return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev, *alias_dev;
+ struct msi_domain_info *msi_info;
+ int alias_count = 0, minnvec = 1;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ pdev = to_pci_dev(dev);
+ /*
+ * If pdev is downstream of any aliasing bridges, take an upper
+ * bound of how many other vectors could map to the same DevID.
+ * Also tell the ITS that the signalling will come from a proxy
+ * device, and that special allocation rules apply.
+ */
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
+ if (alias_dev != pdev) {
+ if (alias_dev->subordinate)
+ pci_walk_bus(alias_dev->subordinate,
+ its_pci_msi_vec_count, &alias_count);
+ info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
+ }
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
+
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+ .msi_prepare = its_pci_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &its_pci_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
+ const char *name)
+{
+ struct irq_domain *parent;
+
+ parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: Unable to locate ITS domain\n", name);
+ return -ENXIO;
+ }
+
+ if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
+ parent)) {
+ pr_err("%s: Unable to create PCI domain\n", name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init its_pci_of_msi_init(void)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
+ continue;
+
+ pr_info("PCI/MSI: %pOF domain created\n", np);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+
+static int __init
+its_pci_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct fwnode_handle *dom_handle;
+ const char *node_name;
+ int err = -ENXIO;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+ (long)its_entry->base_address);
+ dom_handle = iort_find_domain_token(its_entry->translation_id);
+ if (!dom_handle) {
+ pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+ goto out;
+ }
+
+ err = its_pci_msi_init_one(dom_handle, node_name);
+ if (!err)
+ pr_info("PCI/MSI: %s domain created\n", node_name);
+
+out:
+ kfree(node_name);
+ return err;
+}
+
+static int __init its_pci_acpi_msi_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_pci_msi_parse_madt, 0);
+ return 0;
+}
+#else
+static int __init its_pci_acpi_msi_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init its_pci_msi_init(void)
+{
+ its_pci_of_msi_init();
+ its_pci_acpi_msi_init();
+
+ return 0;
+}
+early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
new file mode 100644
index 000000000..daa6d5053
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/acpi_iort.h>
+#include <linux/device.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+static struct irq_chip its_pmsi_irq_chip = {
+ .name = "ITS-pMSI",
+};
+
+static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
+ u32 *dev_id)
+{
+ int ret, index = 0;
+
+ /* Suck the DeviceID out of the msi-parent property */
+ do {
+ struct of_phandle_args args;
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "msi-parent", "#msi-cells",
+ index, &args);
+ if (args.np == irq_domain_get_of_node(domain)) {
+ if (WARN_ON(args.args_count != 1))
+ return -EINVAL;
+ *dev_id = args.args[0];
+ break;
+ }
+ index++;
+ } while (!ret);
+
+ return ret;
+}
+
+int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+ return -1;
+}
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ if (dev->of_node)
+ ret = of_pmsi_get_dev_id(domain, dev, &dev_id);
+ else
+ ret = iort_pmsi_get_dev_id(dev, &dev_id);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_id;
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pmsi_ops = {
+ .msi_prepare = its_pmsi_prepare,
+};
+
+static struct msi_domain_info its_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &its_pmsi_ops,
+ .chip = &its_pmsi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ struct irq_domain *parent;
+
+ parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n", name);
+ return -ENXIO;
+ }
+
+ if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info,
+ parent)) {
+ pr_err("%s: unable to create platform domain\n", name);
+ return -ENXIO;
+ }
+
+ pr_info("Platform MSI: %s domain created\n", name);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int __init
+its_pmsi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct fwnode_handle *domain_handle;
+ const char *node_name;
+ int err = -ENXIO;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+ (long)its_entry->base_address);
+ domain_handle = iort_find_domain_token(its_entry->translation_id);
+ if (!domain_handle) {
+ pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+ goto out;
+ }
+
+ err = its_pmsi_init_one(domain_handle, node_name);
+
+out:
+ kfree(node_name);
+ return err;
+}
+
+static void __init its_pmsi_acpi_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_pmsi_parse_madt, 0);
+}
+#else
+static inline void its_pmsi_acpi_init(void) { }
+#endif
+
+static void __init its_pmsi_of_init(void)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ its_pmsi_init_one(of_node_to_fwnode(np), np->full_name);
+ }
+}
+
+static int __init its_pmsi_init(void)
+{
+ its_pmsi_of_init();
+ its_pmsi_acpi_init();
+ return 0;
+}
+early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
new file mode 100644
index 000000000..895688150
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -0,0 +1,5619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/efi.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v4.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+
+#include "irq-gic-common.h"
+
+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
+
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
+
+#define RD_LOCAL_LPI_ENABLED BIT(0)
+#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
+#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
+
+static u32 lpi_id_bits;
+
+/*
+ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_NRBITS lpi_id_bits
+#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
+#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+
+#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
+
+/*
+ * Collection structure - just an ID, and a redistributor address to
+ * ping. We use one per CPU as a bag of interrupts assigned to this
+ * CPU.
+ */
+struct its_collection {
+ u64 target_address;
+ u16 col_id;
+};
+
+/*
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
+ */
+struct its_baser {
+ void *base;
+ u64 val;
+ u32 order;
+ u32 psz;
+};
+
+struct its_device;
+
+/*
+ * The ITS structure - contains most of the infrastructure, with the
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
+ *
+ * dev_alloc_lock has to be taken for device allocations, while the
+ * spinlock must be taken to parse data structures such as the device
+ * list.
+ */
+struct its_node {
+ raw_spinlock_t lock;
+ struct mutex dev_alloc_lock;
+ struct list_head entry;
+ void __iomem *base;
+ void __iomem *sgir_base;
+ phys_addr_t phys_base;
+ struct its_cmd_block *cmd_base;
+ struct its_cmd_block *cmd_write;
+ struct its_baser tables[GITS_BASER_NR_REGS];
+ struct its_collection *collections;
+ struct fwnode_handle *fwnode_handle;
+ u64 (*get_msi_base)(struct its_device *its_dev);
+ u64 typer;
+ u64 cbaser_save;
+ u32 ctlr_save;
+ u32 mpidr;
+ struct list_head its_device_list;
+ u64 flags;
+ unsigned long list_nr;
+ int numa_node;
+ unsigned int msi_domain_flags;
+ u32 pre_its_base; /* for Socionext Synquacer */
+ int vlpi_redist_offset;
+};
+
+#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
+#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
+
+#define ITS_ITT_ALIGN SZ_256
+
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS \
+ ({ \
+ int nvpeid = 16; \
+ if (gic_rdists->has_rvpeid && \
+ gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
+ nvpeid = 1 + (gic_rdists->gicd_typer2 & \
+ GICD_TYPER2_VID); \
+ \
+ nvpeid; \
+ })
+#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
+
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
+
+struct event_lpi_map {
+ unsigned long *lpi_map;
+ u16 *col_map;
+ irq_hw_number_t lpi_base;
+ int nr_lpis;
+ raw_spinlock_t vlpi_lock;
+ struct its_vm *vm;
+ struct its_vlpi_map *vlpi_maps;
+ int nr_vlpis;
+};
+
+/*
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
+ * translation table, and a list of interrupts. If it some of its
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
+ * indicates which one.
+ */
+struct its_device {
+ struct list_head entry;
+ struct its_node *its;
+ struct event_lpi_map event_map;
+ void *itt;
+ u32 nr_ites;
+ u32 device_id;
+ bool shared;
+};
+
+static struct {
+ raw_spinlock_t lock;
+ struct its_device *dev;
+ struct its_vpe **vpes;
+ int next_victim;
+} vpe_proxy;
+
+struct cpu_lpi_count {
+ atomic_t managed;
+ atomic_t unmanaged;
+};
+
+static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
+
+static LIST_HEAD(its_nodes);
+static DEFINE_RAW_SPINLOCK(its_lock);
+static struct rdists *gic_rdists;
+static struct irq_domain *its_parent;
+
+static unsigned long its_list_map;
+static u16 vmovp_seq_num;
+static DEFINE_RAW_SPINLOCK(vmovp_lock);
+
+static DEFINE_IDA(its_vpeid_ida);
+
+#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+
+/*
+ * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
+ * always have vSGIs mapped.
+ */
+static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
+{
+ return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
+}
+
+static u16 get_its_list(struct its_vm *vm)
+{
+ struct its_node *its;
+ unsigned long its_list = 0;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+
+ if (require_its_list_vmovp(vm, its))
+ __set_bit(its->list_nr, &its_list);
+ }
+
+ return (u16)its_list;
+}
+
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->event_map.lpi_base;
+}
+
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+ u32 event)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->collections + its_dev->event_map.col_map[event];
+}
+
+static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
+ u32 event)
+{
+ if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
+ return NULL;
+
+ return &its_dev->event_map.vlpi_maps[event];
+}
+
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ return dev_event_to_vlpi_map(its_dev, event);
+ }
+
+ return NULL;
+}
+
+static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
+ return vpe->col_idx;
+}
+
+static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+}
+
+static struct irq_chip its_vpe_irq_chip;
+
+static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
+{
+ struct its_vpe *vpe = NULL;
+ int cpu;
+
+ if (d->chip == &its_vpe_irq_chip) {
+ vpe = irq_data_get_irq_chip_data(d);
+ } else {
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ if (map)
+ vpe = map->vpe;
+ }
+
+ if (vpe) {
+ cpu = vpe_to_cpuid_lock(vpe, flags);
+ } else {
+ /* Physical LPIs are already locked via the irq_desc lock */
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ cpu = its_dev->event_map.col_map[its_get_event_id(d)];
+ /* Keep GCC quiet... */
+ *flags = 0;
+ }
+
+ return cpu;
+}
+
+static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
+{
+ struct its_vpe *vpe = NULL;
+
+ if (d->chip == &its_vpe_irq_chip) {
+ vpe = irq_data_get_irq_chip_data(d);
+ } else {
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ if (map)
+ vpe = map->vpe;
+ }
+
+ if (vpe)
+ vpe_to_cpuid_unlock(vpe, flags);
+}
+
+static struct its_collection *valid_col(struct its_collection *col)
+{
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
+ return NULL;
+
+ return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+ if (valid_col(its->collections + vpe->col_idx))
+ return vpe;
+
+ return NULL;
+}
+
+/*
+ * ITS command descriptors - parameters to be encoded in a command
+ * block.
+ */
+struct its_cmd_desc {
+ union {
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_inv_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_clear_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_int_cmd;
+
+ struct {
+ struct its_device *dev;
+ int valid;
+ } its_mapd_cmd;
+
+ struct {
+ struct its_collection *col;
+ int valid;
+ } its_mapc_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 phys_id;
+ u32 event_id;
+ } its_mapti_cmd;
+
+ struct {
+ struct its_device *dev;
+ struct its_collection *col;
+ u32 event_id;
+ } its_movi_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_discard_cmd;
+
+ struct {
+ struct its_collection *col;
+ } its_invall_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_vinvall_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_collection *col;
+ bool valid;
+ } its_vmapp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_device *dev;
+ u32 virt_id;
+ u32 event_id;
+ bool db_enabled;
+ } its_vmapti_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_device *dev;
+ u32 event_id;
+ bool db_enabled;
+ } its_vmovi_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_collection *col;
+ u16 seq_num;
+ u16 its_list;
+ } its_vmovp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_invdb_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ u8 sgi;
+ u8 priority;
+ bool enable;
+ bool group;
+ bool clear;
+ } its_vsgi_cmd;
+ };
+};
+
+/*
+ * The ITS command block, which is what the ITS actually parses.
+ */
+struct its_cmd_block {
+ union {
+ u64 raw_cmd[4];
+ __le64 raw_cmd_le[4];
+ };
+};
+
+#define ITS_CMD_QUEUE_SZ SZ_64K
+#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
+
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
+ struct its_cmd_block *,
+ struct its_cmd_desc *);
+
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
+ struct its_cmd_block *,
+ struct its_cmd_desc *);
+
+static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
+{
+ u64 mask = GENMASK_ULL(h, l);
+ *raw_cmd &= ~mask;
+ *raw_cmd |= (val << l) & mask;
+}
+
+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
+{
+ its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
+}
+
+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
+{
+ its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
+}
+
+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
+{
+ its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
+}
+
+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
+{
+ its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
+}
+
+static void its_encode_size(struct its_cmd_block *cmd, u8 size)
+{
+ its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
+}
+
+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
+{
+ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
+}
+
+static void its_encode_valid(struct its_cmd_block *cmd, int valid)
+{
+ its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
+}
+
+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
+{
+ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
+}
+
+static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
+{
+ its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
+}
+
+static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
+}
+
+static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
+{
+ its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
+}
+
+static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
+}
+
+static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
+}
+
+static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
+{
+ its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
+}
+
+static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
+{
+ its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
+}
+
+static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
+}
+
+static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
+}
+
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+ its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+ its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_db(struct its_cmd_block *cmd, bool db)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
+}
+
+static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
+{
+ its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
+}
+
+static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
+{
+ its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
+}
+
+static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
+{
+ its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
+}
+
+static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
+{
+ its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
+}
+
+static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
+{
+ its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
+}
+
+static inline void its_fixup_cmd(struct its_cmd_block *cmd)
+{
+ /* Let's fixup BE commands */
+ cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
+}
+
+static struct its_collection *its_build_mapd_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ unsigned long itt_addr;
+ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
+
+ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
+ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
+
+ its_encode_cmd(cmd, GITS_CMD_MAPD);
+ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
+ its_encode_size(cmd, size - 1);
+ its_encode_itt(cmd, itt_addr);
+ its_encode_valid(cmd, desc->its_mapd_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return NULL;
+}
+
+static struct its_collection *its_build_mapc_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MAPC);
+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
+ its_encode_valid(cmd, desc->its_mapc_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapc_cmd.col;
+}
+
+static struct its_collection *its_build_mapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_mapti_cmd.dev,
+ desc->its_mapti_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_MAPTI);
+ its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
+ its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
+ its_encode_collection(cmd, col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_movi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_movi_cmd.dev,
+ desc->its_movi_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_MOVI);
+ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
+ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_discard_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_discard_cmd.dev,
+ desc->its_discard_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_DISCARD);
+ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_inv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_int_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_clear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_col(col);
+}
+
+static struct its_collection *its_build_invall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_INVALL);
+ its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_invall_cmd.col;
+}
+
+static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_VINVALL);
+ its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vinvall_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ unsigned long vpt_addr, vconf_addr;
+ u64 target;
+ bool alloc;
+
+ its_encode_cmd(cmd, GITS_CMD_VMAPP);
+ its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
+ its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+ if (!desc->its_vmapp_cmd.valid) {
+ if (is_v4_1(its)) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ its_encode_alloc(cmd, alloc);
+ }
+
+ goto out;
+ }
+
+ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
+ its_encode_target(cmd, target);
+ its_encode_vpt_addr(cmd, vpt_addr);
+ its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+
+ if (!is_v4_1(its))
+ goto out;
+
+ vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+ its_encode_alloc(cmd, alloc);
+
+ /*
+ * GICv4.1 provides a way to get the VLPI state, which needs the vPE
+ * to be unmapped first, and in this case, we may remap the vPE
+ * back while the VPT is not empty. So we can't assume that the
+ * VPT is empty on map. This is why we never advertise PTZ.
+ */
+ its_encode_ptz(cmd, false);
+ its_encode_vconf_addr(cmd, vconf_addr);
+ its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ u32 db;
+
+ if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
+ db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
+ else
+ db = 1023;
+
+ its_encode_cmd(cmd, GITS_CMD_VMAPTI);
+ its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
+ its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
+ its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
+ its_encode_db_phys_id(cmd, db);
+ its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vmapti_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ u32 db;
+
+ if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
+ db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
+ else
+ db = 1023;
+
+ its_encode_cmd(cmd, GITS_CMD_VMOVI);
+ its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
+ its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
+ its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
+ its_encode_db_phys_id(cmd, db);
+ its_encode_db_valid(cmd, true);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vmovi_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ u64 target;
+
+ target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
+ its_encode_cmd(cmd, GITS_CMD_VMOVP);
+ its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
+ its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
+ its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
+ its_encode_target(cmd, target);
+
+ if (is_v4_1(its)) {
+ its_encode_db(cmd, true);
+ its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
+ }
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vmovp_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vint_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_INVDB);
+ its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_invdb_cmd.vpe);
+}
+
+static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_VSGI);
+ its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
+ its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
+ its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
+ its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
+ its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
+ its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vsgi_cmd.vpe);
+}
+
+static u64 its_cmd_ptr_to_offset(struct its_node *its,
+ struct its_cmd_block *ptr)
+{
+ return (ptr - its->cmd_base) * sizeof(*ptr);
+}
+
+static int its_queue_full(struct its_node *its)
+{
+ int widx;
+ int ridx;
+
+ widx = its->cmd_write - its->cmd_base;
+ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
+
+ /* This is incredibly unlikely to happen, unless the ITS locks up. */
+ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
+ return 1;
+
+ return 0;
+}
+
+static struct its_cmd_block *its_allocate_entry(struct its_node *its)
+{
+ struct its_cmd_block *cmd;
+ u32 count = 1000000; /* 1s! */
+
+ while (its_queue_full(its)) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue not draining\n");
+ return NULL;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+
+ cmd = its->cmd_write++;
+
+ /* Handle queue wrapping */
+ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
+ its->cmd_write = its->cmd_base;
+
+ /* Clear command */
+ cmd->raw_cmd[0] = 0;
+ cmd->raw_cmd[1] = 0;
+ cmd->raw_cmd[2] = 0;
+ cmd->raw_cmd[3] = 0;
+
+ return cmd;
+}
+
+static struct its_cmd_block *its_post_commands(struct its_node *its)
+{
+ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
+
+ writel_relaxed(wr, its->base + GITS_CWRITER);
+
+ return its->cmd_write;
+}
+
+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
+{
+ /*
+ * Make sure the commands written to memory are observable by
+ * the ITS.
+ */
+ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
+ gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
+ else
+ dsb(ishst);
+}
+
+static int its_wait_for_range_completion(struct its_node *its,
+ u64 prev_idx,
+ struct its_cmd_block *to)
+{
+ u64 rd_idx, to_idx, linear_idx;
+ u32 count = 1000000; /* 1s! */
+
+ /* Linearize to_idx if the command set has wrapped around */
+ to_idx = its_cmd_ptr_to_offset(its, to);
+ if (to_idx < prev_idx)
+ to_idx += ITS_CMD_QUEUE_SZ;
+
+ linear_idx = prev_idx;
+
+ while (1) {
+ s64 delta;
+
+ rd_idx = readl_relaxed(its->base + GITS_CREADR);
+
+ /*
+ * Compute the read pointer progress, taking the
+ * potential wrap-around into account.
+ */
+ delta = rd_idx - prev_idx;
+ if (rd_idx < prev_idx)
+ delta += ITS_CMD_QUEUE_SZ;
+
+ linear_idx += delta;
+ if (linear_idx >= to_idx)
+ break;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
+ to_idx, linear_idx);
+ return -1;
+ }
+ prev_idx = rd_idx;
+ cpu_relax();
+ udelay(1);
+ }
+
+ return 0;
+}
+
+/* Warning, macro hell follows */
+#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
+void name(struct its_node *its, \
+ buildtype builder, \
+ struct its_cmd_desc *desc) \
+{ \
+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
+ synctype *sync_obj; \
+ unsigned long flags; \
+ u64 rd_idx; \
+ \
+ raw_spin_lock_irqsave(&its->lock, flags); \
+ \
+ cmd = its_allocate_entry(its); \
+ if (!cmd) { /* We're soooooo screewed... */ \
+ raw_spin_unlock_irqrestore(&its->lock, flags); \
+ return; \
+ } \
+ sync_obj = builder(its, cmd, desc); \
+ its_flush_cmd(its, cmd); \
+ \
+ if (sync_obj) { \
+ sync_cmd = its_allocate_entry(its); \
+ if (!sync_cmd) \
+ goto post; \
+ \
+ buildfn(its, sync_cmd, sync_obj); \
+ its_flush_cmd(its, sync_cmd); \
+ } \
+ \
+post: \
+ rd_idx = readl_relaxed(its->base + GITS_CREADR); \
+ next_cmd = its_post_commands(its); \
+ raw_spin_unlock_irqrestore(&its->lock, flags); \
+ \
+ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
+ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
+}
+
+static void its_build_sync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
+ struct its_collection *sync_col)
+{
+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+ its_encode_target(sync_cmd, sync_col->target_address);
+
+ its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
+ struct its_collection, its_build_sync_cmd)
+
+static void its_build_vsync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
+ struct its_vpe *sync_vpe)
+{
+ its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
+ its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
+
+ its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
+ struct its_vpe, its_build_vsync_cmd)
+
+static void its_send_int(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
+
+ its_send_single_command(dev->its, its_build_int_cmd, &desc);
+}
+
+static void its_send_clear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
+
+ its_send_single_command(dev->its, its_build_clear_cmd, &desc);
+}
+
+static void its_send_inv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_command(dev->its, its_build_inv_cmd, &desc);
+}
+
+static void its_send_mapd(struct its_device *dev, int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapd_cmd.dev = dev;
+ desc.its_mapd_cmd.valid = !!valid;
+
+ its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
+}
+
+static void its_send_mapc(struct its_node *its, struct its_collection *col,
+ int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapc_cmd.col = col;
+ desc.its_mapc_cmd.valid = !!valid;
+
+ its_send_single_command(its, its_build_mapc_cmd, &desc);
+}
+
+static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapti_cmd.dev = dev;
+ desc.its_mapti_cmd.phys_id = irq_id;
+ desc.its_mapti_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
+}
+
+static void its_send_movi(struct its_device *dev,
+ struct its_collection *col, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_movi_cmd.dev = dev;
+ desc.its_movi_cmd.col = col;
+ desc.its_movi_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_movi_cmd, &desc);
+}
+
+static void its_send_discard(struct its_device *dev, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_discard_cmd.dev = dev;
+ desc.its_discard_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_discard_cmd, &desc);
+}
+
+static void its_send_invall(struct its_node *its, struct its_collection *col)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_invall_cmd.col = col;
+
+ its_send_single_command(its, its_build_invall_cmd, &desc);
+}
+
+static void its_send_vmapti(struct its_device *dev, u32 id)
+{
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
+ struct its_cmd_desc desc;
+
+ desc.its_vmapti_cmd.vpe = map->vpe;
+ desc.its_vmapti_cmd.dev = dev;
+ desc.its_vmapti_cmd.virt_id = map->vintid;
+ desc.its_vmapti_cmd.event_id = id;
+ desc.its_vmapti_cmd.db_enabled = map->db_enabled;
+
+ its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
+}
+
+static void its_send_vmovi(struct its_device *dev, u32 id)
+{
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
+ struct its_cmd_desc desc;
+
+ desc.its_vmovi_cmd.vpe = map->vpe;
+ desc.its_vmovi_cmd.dev = dev;
+ desc.its_vmovi_cmd.event_id = id;
+ desc.its_vmovi_cmd.db_enabled = map->db_enabled;
+
+ its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
+}
+
+static void its_send_vmapp(struct its_node *its,
+ struct its_vpe *vpe, bool valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_vmapp_cmd.vpe = vpe;
+ desc.its_vmapp_cmd.valid = valid;
+ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
+
+ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
+}
+
+static void its_send_vmovp(struct its_vpe *vpe)
+{
+ struct its_cmd_desc desc = {};
+ struct its_node *its;
+ unsigned long flags;
+ int col_id = vpe->col_idx;
+
+ desc.its_vmovp_cmd.vpe = vpe;
+
+ if (!its_list_map) {
+ its = list_first_entry(&its_nodes, struct its_node, entry);
+ desc.its_vmovp_cmd.col = &its->collections[col_id];
+ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+ return;
+ }
+
+ /*
+ * Yet another marvel of the architecture. If using the
+ * its_list "feature", we need to make sure that all ITSs
+ * receive all VMOVP commands in the same order. The only way
+ * to guarantee this is to make vmovp a serialization point.
+ *
+ * Wall <-- Head.
+ */
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+ desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
+
+ /* Emit VMOVPs */
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+
+ if (!require_its_list_vmovp(vpe->its_vm, its))
+ continue;
+
+ desc.its_vmovp_cmd.col = &its->collections[col_id];
+ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_vinvall_cmd.vpe = vpe;
+ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
+}
+
+static void its_send_vinv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINV command. This is just a normal INV,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
+}
+
+static void its_send_vint(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINT command. This is just a normal INT,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
+}
+
+static void its_send_vclear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VCLEAR command. This is just a normal CLEAR,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
+}
+
+static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_invdb_cmd.vpe = vpe;
+ its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
+}
+
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
+static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
+{
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ irq_hw_number_t hwirq;
+ void *va;
+ u8 *cfg;
+
+ if (map) {
+ va = page_address(map->vm->vprop_page);
+ hwirq = map->vintid;
+
+ /* Remember the updated property */
+ map->properties &= ~clr;
+ map->properties |= set | LPI_PROP_GROUP1;
+ } else {
+ va = gic_rdists->prop_table_va;
+ hwirq = d->hwirq;
+ }
+
+ cfg = va + hwirq - 8192;
+ *cfg &= ~clr;
+ *cfg |= set | LPI_PROP_GROUP1;
+
+ /*
+ * Make the above write visible to the redistributors.
+ * And yes, we're flushing exactly: One. Single. Byte.
+ * Humpf...
+ */
+ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
+ gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
+ else
+ dsb(ishst);
+}
+
+static void wait_for_syncr(void __iomem *rdbase)
+{
+ while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+}
+
+static void __direct_lpi_inv(struct irq_data *d, u64 val)
+{
+ void __iomem *rdbase;
+ unsigned long flags;
+ int cpu;
+
+ /* Target the redistributor this LPI is currently routed to */
+ cpu = irq_to_cpuid_lock(d, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
+ wait_for_syncr(rdbase);
+
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ irq_to_cpuid_unlock(d, flags);
+}
+
+static void direct_lpi_inv(struct irq_data *d)
+{
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ u64 val;
+
+ if (map) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ WARN_ON(!is_v4_1(its_dev->its));
+
+ val = GICR_INVLPIR_V;
+ val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
+ val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
+ } else {
+ val = d->hwirq;
+ }
+
+ __direct_lpi_inv(d, val);
+}
+
+static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ lpi_write_config(d, clr, set);
+ if (gic_rdists->has_direct_lpi &&
+ (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
+ direct_lpi_inv(d);
+ else if (!irqd_is_forwarded_to_vcpu(d))
+ its_send_inv(its_dev, its_get_event_id(d));
+ else
+ its_send_vinv(its_dev, its_get_event_id(d));
+}
+
+static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
+
+ /*
+ * GICv4.1 does away with the per-LPI nonsense, nothing to do
+ * here.
+ */
+ if (is_v4_1(its_dev->its))
+ return;
+
+ map = dev_event_to_vlpi_map(its_dev, event);
+
+ if (map->db_enabled == enable)
+ return;
+
+ map->db_enabled = enable;
+
+ /*
+ * More fun with the architecture:
+ *
+ * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
+ * value or to 1023, depending on the enable bit. But that
+ * would be issuing a mapping for an /existing/ DevID+EventID
+ * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
+ * to the /same/ vPE, using this opportunity to adjust the
+ * doorbell. Mouahahahaha. We loves it, Precious.
+ */
+ its_send_vmovi(its_dev, event);
+}
+
+static void its_mask_irq(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d))
+ its_vlpi_set_doorbell(d, false);
+
+ lpi_update_config(d, LPI_PROP_ENABLED, 0);
+}
+
+static void its_unmask_irq(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d))
+ its_vlpi_set_doorbell(d, true);
+
+ lpi_update_config(d, 0, LPI_PROP_ENABLED);
+}
+
+static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_inc_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_dec_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
+ const struct cpumask *cpu_mask)
+{
+ unsigned int cpu = nr_cpu_ids, tmp;
+ int count = S32_MAX;
+
+ for_each_cpu(tmp, cpu_mask) {
+ int this_count = its_read_lpi_count(d, tmp);
+ if (this_count < count) {
+ cpu = tmp;
+ count = this_count;
+ }
+ }
+
+ return cpu;
+}
+
+/*
+ * As suggested by Thomas Gleixner in:
+ * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
+ */
+static int its_select_cpu(struct irq_data *d,
+ const struct cpumask *aff_mask)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ static DEFINE_RAW_SPINLOCK(tmpmask_lock);
+ static struct cpumask __tmpmask;
+ struct cpumask *tmpmask;
+ unsigned long flags;
+ int cpu, node;
+ node = its_dev->its->numa_node;
+ tmpmask = &__tmpmask;
+
+ raw_spin_lock_irqsave(&tmpmask_lock, flags);
+
+ if (!irqd_affinity_is_managed(d)) {
+ /* First try the NUMA node */
+ if (node != NUMA_NO_NODE) {
+ /*
+ * Try the intersection of the affinity mask and the
+ * node mask (and the online mask, just to be safe).
+ */
+ cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
+ cpumask_and(tmpmask, tmpmask, cpu_online_mask);
+
+ /*
+ * Ideally, we would check if the mask is empty, and
+ * try again on the full node here.
+ *
+ * But it turns out that the way ACPI describes the
+ * affinity for ITSs only deals about memory, and
+ * not target CPUs, so it cannot describe a single
+ * ITS placed next to two NUMA nodes.
+ *
+ * Instead, just fallback on the online mask. This
+ * diverges from Thomas' suggestion above.
+ */
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ if (cpu < nr_cpu_ids)
+ goto out;
+
+ /* If we can't cross sockets, give up */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
+ goto out;
+
+ /* If the above failed, expand the search */
+ }
+
+ /* Try the intersection of the affinity and online masks */
+ cpumask_and(tmpmask, aff_mask, cpu_online_mask);
+
+ /* If that doesn't fly, the online mask is the last resort */
+ if (cpumask_empty(tmpmask))
+ cpumask_copy(tmpmask, cpu_online_mask);
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ } else {
+ cpumask_copy(tmpmask, aff_mask);
+
+ /* If we cannot cross sockets, limit the search to that node */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
+ node != NUMA_NO_NODE)
+ cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ }
+out:
+ raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
+
+ pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
+ return cpu;
+}
+
+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_collection *target_col;
+ u32 id = its_get_event_id(d);
+ int cpu, prev_cpu;
+
+ /* A forwarded interrupt should use irq_set_vcpu_affinity */
+ if (irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
+
+ prev_cpu = its_dev->event_map.col_map[id];
+ its_dec_lpi_count(d, prev_cpu);
+
+ if (!force)
+ cpu = its_select_cpu(d, mask_val);
+ else
+ cpu = cpumask_pick_least_loaded(d, mask_val);
+
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ goto err;
+
+ /* don't set the affinity when the target cpu is same as current one */
+ if (cpu != prev_cpu) {
+ target_col = &its_dev->its->collections[cpu];
+ its_send_movi(its_dev, target_col, id);
+ its_dev->event_map.col_map[id] = cpu;
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ }
+
+ its_inc_lpi_count(d, cpu);
+
+ return IRQ_SET_MASK_OK_DONE;
+
+err:
+ its_inc_lpi_count(d, prev_cpu);
+ return -EINVAL;
+}
+
+static u64 its_irq_get_msi_base(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->phys_base + GITS_TRANSLATER;
+}
+
+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+ u64 addr;
+
+ its = its_dev->its;
+ addr = its->get_msi_base(its_dev);
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = its_get_event_id(d);
+
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
+}
+
+static int its_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ if (state)
+ its_send_vint(its_dev, event);
+ else
+ its_send_vclear(its_dev, event);
+ } else {
+ if (state)
+ its_send_int(its_dev, event);
+ else
+ its_send_clear(its_dev, event);
+ }
+
+ return 0;
+}
+
+static int its_irq_retrigger(struct irq_data *d)
+{
+ return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
+/*
+ * Two favourable cases:
+ *
+ * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
+ * for vSGI delivery
+ *
+ * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
+ * and we're better off mapping all VPEs always
+ *
+ * If neither (a) nor (b) is true, then we map vPEs on demand.
+ *
+ */
+static bool gic_requires_eager_mapping(void)
+{
+ if (!its_list_map || gic_rdists->has_rvpeid)
+ return true;
+
+ return false;
+}
+
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ if (gic_requires_eager_mapping())
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ /*
+ * If the VM wasn't mapped yet, iterate over the vpes and get
+ * them mapped now.
+ */
+ vm->vlpi_count[its->list_nr]++;
+
+ if (vm->vlpi_count[its->list_nr] == 1) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+ struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (gic_requires_eager_mapping())
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ its_send_vmapp(its, vm->vpes[i], false);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int ret = 0;
+
+ if (!info->map)
+ return -EINVAL;
+
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ if (!its_dev->event_map.vm) {
+ struct its_vlpi_map *maps;
+
+ maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+ GFP_ATOMIC);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ its_dev->event_map.vm = info->map->vm;
+ its_dev->event_map.vlpi_maps = maps;
+ } else if (its_dev->event_map.vm != info->map->vm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get our private copy of the mapping information */
+ its_dev->event_map.vlpi_maps[event] = *info->map;
+
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ /* Already mapped, move it around */
+ its_send_vmovi(its_dev, event);
+ } else {
+ /* Ensure all the VPEs are mapped on this ITS */
+ its_map_vm(its_dev->its, info->map->vm);
+
+ /*
+ * Flag the interrupt as forwarded so that we can
+ * start poking the virtual property table.
+ */
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Write out the property to the prop table */
+ lpi_write_config(d, 0xff, info->map->properties);
+
+ /* Drop the physical mapping */
+ its_send_discard(its_dev, event);
+
+ /* and install the virtual one */
+ its_send_vmapti(its_dev, event);
+
+ /* Increment the number of VLPIs */
+ its_dev->event_map.nr_vlpis++;
+ }
+
+out:
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_vlpi_map *map;
+ int ret = 0;
+
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ map = get_vlpi_map(d);
+
+ if (!its_dev->event_map.vm || !map) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Copy our mapping information to the incoming request */
+ *info->map = *map;
+
+out:
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_unmap(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int ret = 0;
+
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Drop the virtual mapping */
+ its_send_discard(its_dev, event);
+
+ /* and restore the physical one */
+ irqd_clr_forwarded_to_vcpu(d);
+ its_send_mapti(its_dev, d->hwirq, event);
+ lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+ LPI_PROP_ENABLED |
+ LPI_PROP_GROUP1));
+
+ /* Potentially unmap the VM from this ITS */
+ its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
+ /*
+ * Drop the refcount and make the device available again if
+ * this was the last VLPI.
+ */
+ if (!--its_dev->event_map.nr_vlpis) {
+ its_dev->event_map.vm = NULL;
+ kfree(its_dev->event_map.vlpi_maps);
+ }
+
+out:
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
+
+ if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
+ lpi_update_config(d, 0xff, info->config);
+ else
+ lpi_write_config(d, 0xff, info->config);
+ its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
+
+ return 0;
+}
+
+static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ /* Need a v4 ITS */
+ if (!is_v4(its_dev->its))
+ return -EINVAL;
+
+ /* Unmap request? */
+ if (!info)
+ return its_vlpi_unmap(d);
+
+ switch (info->cmd_type) {
+ case MAP_VLPI:
+ return its_vlpi_map(d, info);
+
+ case GET_VLPI:
+ return its_vlpi_get(d, info);
+
+ case PROP_UPDATE_VLPI:
+ case PROP_UPDATE_AND_INV_VLPI:
+ return its_vlpi_prop_update(d, info);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_irq_chip = {
+ .name = "ITS",
+ .irq_mask = its_mask_irq,
+ .irq_unmask = its_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_set_affinity,
+ .irq_compose_msi_msg = its_irq_compose_msi_msg,
+ .irq_set_irqchip_state = its_irq_set_irqchip_state,
+ .irq_retrigger = its_irq_retrigger,
+ .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
+};
+
+
+/*
+ * How we allocate LPIs:
+ *
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
+ *
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
+ */
+#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
+
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+ struct list_head entry;
+ u32 base_id;
+ u32 span;
+};
+
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
+{
+ struct lpi_range *range;
+
+ range = kmalloc(sizeof(*range), GFP_KERNEL);
+ if (range) {
+ range->base_id = base;
+ range->span = span;
+ }
+
+ return range;
+}
+
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+ struct lpi_range *range, *tmp;
+ int err = -ENOSPC;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (range->span >= nr_lpis) {
+ *base = range->base_id;
+ range->base_id += nr_lpis;
+ range->span -= nr_lpis;
+
+ if (range->span == 0) {
+ list_del(&range->entry);
+ kfree(range);
+ }
+
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&lpi_range_lock);
+
+ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+ return err;
+}
+
+static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
+{
+ if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
+ return;
+ if (a->base_id + a->span != b->base_id)
+ return;
+ b->base_id = a->base_id;
+ b->span += a->span;
+ list_del(&a->entry);
+ kfree(a);
+}
+
+static int free_lpi_range(u32 base, u32 nr_lpis)
+{
+ struct lpi_range *new, *old;
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new)
+ return -ENOMEM;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_reverse(old, &lpi_range_list, entry) {
+ if (old->base_id < base)
+ break;
+ }
+ /*
+ * old is the last element with ->base_id smaller than base,
+ * so new goes right after it. If there are no elements with
+ * ->base_id smaller than base, &old->entry ends up pointing
+ * at the head of the list, and inserting new it the start of
+ * the list is the right thing to do in that case as well.
+ */
+ list_add(&new->entry, &old->entry);
+ /*
+ * Now check if we can merge with the preceding and/or
+ * following ranges.
+ */
+ merge_lpi_ranges(old, new);
+ merge_lpi_ranges(new, list_next_entry(new, entry));
+
+ mutex_unlock(&lpi_range_lock);
+ return 0;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+ u32 lpis = (1UL << id_bits) - 8192;
+ u32 numlpis;
+ int err;
+
+ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+ lpis = numlpis;
+ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+ lpis);
+ }
+
+ /*
+ * Initializing the allocator is just the same as freeing the
+ * full range of LPIs.
+ */
+ err = free_lpi_range(8192, lpis);
+ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+ return err;
+}
+
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int err = 0;
+
+ do {
+ err = alloc_lpi_range(nr_irqs, base);
+ if (!err)
+ break;
+
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
+
+ if (!nr_irqs)
+ err = -ENOSPC;
+
+ if (err)
+ goto out;
+
+ bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
+ if (!bitmap)
+ goto out;
+
+ *nr_ids = nr_irqs;
+
+out:
+ if (!bitmap)
+ *base = *nr_ids = 0;
+
+ return bitmap;
+}
+
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
+{
+ WARN_ON(free_lpi_range(base, nr_ids));
+ bitmap_free(bitmap);
+}
+
+static void gic_reset_prop_table(void *va)
+{
+ /* Priority 0xa0, Group-1, disabled */
+ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
+}
+
+static struct page *its_allocate_prop_table(gfp_t gfp_flags)
+{
+ struct page *prop_page;
+
+ prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+ if (!prop_page)
+ return NULL;
+
+ gic_reset_prop_table(page_address(prop_page));
+
+ return prop_page;
+}
+
+static void its_free_prop_table(struct page *prop_page)
+{
+ free_pages((unsigned long)page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
+}
+
+static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
+{
+ phys_addr_t start, end, addr_end;
+ u64 i;
+
+ /*
+ * We don't bother checking for a kdump kernel as by
+ * construction, the LPI tables are out of this kernel's
+ * memory map.
+ */
+ if (is_kdump_kernel())
+ return true;
+
+ addr_end = addr + size - 1;
+
+ for_each_reserved_mem_range(i, &start, &end) {
+ if (addr >= start && addr_end <= end)
+ return true;
+ }
+
+ /* Not found, not a good sign... */
+ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
+ &addr, &addr_end);
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+ return false;
+}
+
+static int gic_reserve_range(phys_addr_t addr, unsigned long size)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ return efi_mem_reserve_persistent(addr, size);
+
+ return 0;
+}
+
+static int __init its_setup_lpi_prop_table(void)
+{
+ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
+ u64 val;
+
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
+
+ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
+ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ,
+ MEMREMAP_WB);
+ gic_reset_prop_table(gic_rdists->prop_table_va);
+ } else {
+ struct page *page;
+
+ lpi_id_bits = min_t(u32,
+ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
+ page = its_allocate_prop_table(GFP_NOWAIT);
+ if (!page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ gic_rdists->prop_table_pa = page_to_phys(page);
+ gic_rdists->prop_table_va = page_address(page);
+ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ));
+ }
+
+ pr_info("GICv3: using LPI property table @%pa\n",
+ &gic_rdists->prop_table_pa);
+
+ return its_lpi_init(lpi_id_bits);
+}
+
+static const char *its_base_type_string[] = {
+ [GITS_BASER_TYPE_DEVICE] = "Devices",
+ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
+ [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
+ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
+ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
+ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
+ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
+};
+
+static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
+{
+ u32 idx = baser - its->tables;
+
+ return gits_read_baser(its->base + GITS_BASER + (idx << 3));
+}
+
+static void its_write_baser(struct its_node *its, struct its_baser *baser,
+ u64 val)
+{
+ u32 idx = baser - its->tables;
+
+ gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
+ baser->val = its_read_baser(its, baser);
+}
+
+static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+ u64 cache, u64 shr, u32 order, bool indirect)
+{
+ u64 val = its_read_baser(its, baser);
+ u64 esz = GITS_BASER_ENTRY_SIZE(val);
+ u64 type = GITS_BASER_TYPE(val);
+ u64 baser_phys, tmp;
+ u32 alloc_pages, psz;
+ struct page *page;
+ void *base;
+
+ psz = baser->psz;
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
+ if (alloc_pages > GITS_BASER_PAGES_MAX) {
+ pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
+ &its->phys_base, its_base_type_string[type],
+ alloc_pages, GITS_BASER_PAGES_MAX);
+ alloc_pages = GITS_BASER_PAGES_MAX;
+ order = get_order(GITS_BASER_PAGES_MAX * psz);
+ }
+
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ base = (void *)page_address(page);
+ baser_phys = virt_to_phys(base);
+
+ /* Check if the physical address of the memory is above 48bits */
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+ /* 52bit PA is supported only when PageSize=64K */
+ if (psz != SZ_64K) {
+ pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ /* Convert 52bit PA to 48bit field */
+ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+ }
+
+retry_baser:
+ val = (baser_phys |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
+ ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
+ cache |
+ shr |
+ GITS_BASER_VALID);
+
+ val |= indirect ? GITS_BASER_INDIRECT : 0x0;
+
+ switch (psz) {
+ case SZ_4K:
+ val |= GITS_BASER_PAGE_SIZE_4K;
+ break;
+ case SZ_16K:
+ val |= GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_64K:
+ val |= GITS_BASER_PAGE_SIZE_64K;
+ break;
+ }
+
+ its_write_baser(its, baser, val);
+ tmp = baser->val;
+
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /*
+ * Shareability didn't stick. Just use
+ * whatever the read reported, which is likely
+ * to be the only thing this redistributor
+ * supports. If that's zero, make it
+ * non-cacheable as well.
+ */
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ if (!shr) {
+ cache = GITS_BASER_nC;
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
+ }
+ goto retry_baser;
+ }
+
+ if (val != tmp) {
+ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
+ &its->phys_base, its_base_type_string[type],
+ val, tmp);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ baser->order = order;
+ baser->base = base;
+ baser->psz = psz;
+ tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
+
+ pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
+ its_base_type_string[type],
+ (unsigned long)virt_to_phys(base),
+ indirect ? "indirect" : "flat", (int)esz,
+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+
+ return 0;
+}
+
+static bool its_parse_indirect_baser(struct its_node *its,
+ struct its_baser *baser,
+ u32 *order, u32 ids)
+{
+ u64 tmp = its_read_baser(its, baser);
+ u64 type = GITS_BASER_TYPE(tmp);
+ u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
+ u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
+ u32 new_order = *order;
+ u32 psz = baser->psz;
+ bool indirect = false;
+
+ /* No need to enable Indirection if memory requirement < (psz*2)bytes */
+ if ((esz << ids) > (psz * 2)) {
+ /*
+ * Find out whether hw supports a single or two-level table by
+ * table by reading bit at offset '62' after writing '1' to it.
+ */
+ its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
+ indirect = !!(baser->val & GITS_BASER_INDIRECT);
+
+ if (indirect) {
+ /*
+ * The size of the lvl2 table is equal to ITS page size
+ * which is 'psz'. For computing lvl1 table size,
+ * subtract ID bits that sparse lvl2 table from 'ids'
+ * which is reported by ITS hardware times lvl1 table
+ * entry size.
+ */
+ ids -= ilog2(psz / (int)esz);
+ esz = GITS_LVL1_ENTRY_SIZE;
+ }
+ }
+
+ /*
+ * Allocate as many entries as required to fit the
+ * range of device IDs that the ITS can grok... The ID
+ * space being incredibly sparse, this results in a
+ * massive waste of memory if two-level device table
+ * feature is not supported by hardware.
+ */
+ new_order = max_t(u32, get_order(esz << ids), new_order);
+ if (new_order >= MAX_ORDER) {
+ new_order = MAX_ORDER - 1;
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
+ pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
+ &its->phys_base, its_base_type_string[type],
+ device_ids(its), ids);
+ }
+
+ *order = new_order;
+
+ return indirect;
+}
+
+static u32 compute_common_aff(u64 val)
+{
+ u32 aff, clpiaff;
+
+ aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
+ clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
+
+ return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
+}
+
+static u32 compute_its_aff(struct its_node *its)
+{
+ u64 val;
+ u32 svpet;
+
+ /*
+ * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
+ * the resulting affinity. We then use that to see if this match
+ * our own affinity.
+ */
+ svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
+ val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
+ val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
+ return compute_common_aff(val);
+}
+
+static struct its_node *find_sibling_its(struct its_node *cur_its)
+{
+ struct its_node *its;
+ u32 aff;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
+ return NULL;
+
+ aff = compute_its_aff(cur_its);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser;
+
+ if (!is_v4_1(its) || its == cur_its)
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ return its;
+ }
+
+ return NULL;
+}
+
+static void its_free_tables(struct its_node *its)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (its->tables[i].base) {
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
+ its->tables[i].base = NULL;
+ }
+ }
+}
+
+static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
+{
+ u64 psz = SZ_64K;
+
+ while (psz) {
+ u64 val, gpsz;
+
+ val = its_read_baser(its, baser);
+ val &= ~GITS_BASER_PAGE_SIZE_MASK;
+
+ switch (psz) {
+ case SZ_64K:
+ gpsz = GITS_BASER_PAGE_SIZE_64K;
+ break;
+ case SZ_16K:
+ gpsz = GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_4K:
+ default:
+ gpsz = GITS_BASER_PAGE_SIZE_4K;
+ break;
+ }
+
+ gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
+
+ val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
+ its_write_baser(its, baser, val);
+
+ if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
+ break;
+
+ switch (psz) {
+ case SZ_64K:
+ psz = SZ_16K;
+ break;
+ case SZ_16K:
+ psz = SZ_4K;
+ break;
+ case SZ_4K:
+ default:
+ return -1;
+ }
+ }
+
+ baser->psz = psz;
+ return 0;
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+ u64 shr = GITS_BASER_InnerShareable;
+ u64 cache = GITS_BASER_RaWaWb;
+ int err, i;
+
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = its->tables + i;
+ u64 val = its_read_baser(its, baser);
+ u64 type = GITS_BASER_TYPE(val);
+ bool indirect = false;
+ u32 order;
+
+ if (type == GITS_BASER_TYPE_NONE)
+ continue;
+
+ if (its_probe_baser_psz(its, baser)) {
+ its_free_tables(its);
+ return -ENXIO;
+ }
+
+ order = get_order(baser->psz);
+
+ switch (type) {
+ case GITS_BASER_TYPE_DEVICE:
+ indirect = its_parse_indirect_baser(its, baser, &order,
+ device_ids(its));
+ break;
+
+ case GITS_BASER_TYPE_VCPU:
+ if (is_v4_1(its)) {
+ struct its_node *sibling;
+
+ WARN_ON(i != 2);
+ if ((sibling = find_sibling_its(its))) {
+ *baser = sibling->tables[2];
+ its_write_baser(its, baser, baser->val);
+ continue;
+ }
+ }
+
+ indirect = its_parse_indirect_baser(its, baser, &order,
+ ITS_MAX_VPEID_BITS);
+ break;
+ }
+
+ err = its_setup_baser(its, baser, cache, shr, order, indirect);
+ if (err < 0) {
+ its_free_tables(its);
+ return err;
+ }
+
+ /* Update settings which will be used for next BASERn */
+ cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
+ shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
+ }
+
+ return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_its(void)
+{
+ struct its_node *its;
+ u64 val;
+ u32 aff;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser, addr;
+
+ if (!is_v4_1(its))
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ /* We have a winner! */
+ gic_data_rdist()->vpe_l1_base = its->tables[2].base;
+
+ val = GICR_VPROPBASER_4_1_VALID;
+ if (baser & GITS_BASER_INDIRECT)
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
+ FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
+ switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
+ case GIC_PAGE_SIZE_64K:
+ addr = GITS_BASER_ADDR_48_to_52(baser);
+ break;
+ default:
+ addr = baser & GENMASK_ULL(47, 12);
+ break;
+ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+ return val;
+ }
+
+ return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
+{
+ u32 aff;
+ u64 val;
+ int cpu;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ for_each_possible_cpu(cpu) {
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+
+ if (!base || cpu == smp_processor_id())
+ continue;
+
+ val = gic_read_typer(base + GICR_TYPER);
+ if (aff != compute_common_aff(val))
+ continue;
+
+ /*
+ * At this point, we have a victim. This particular CPU
+ * has already booted, and has an affinity that matches
+ * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
+ * Make sure we don't write the Z bit in that case.
+ */
+ val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_Z;
+
+ gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
+ *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+
+ return val;
+ }
+
+ return 0;
+}
+
+static bool allocate_vpe_l2_table(int cpu, u32 id)
+{
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+ unsigned int psz, esz, idx, npg, gpsz;
+ u64 val;
+ struct page *page;
+ __le64 *table;
+
+ if (!gic_rdists->has_rvpeid)
+ return true;
+
+ /* Skip non-present CPUs */
+ if (!base)
+ return true;
+
+ val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
+
+ switch (gpsz) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /* Don't allow vpe_id that exceeds single, flat table limit */
+ if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
+ return (id < (npg * psz / (esz * SZ_8)));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = id >> ilog2(psz / (esz * SZ_8));
+ if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
+ return false;
+
+ table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx]) {
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+ if (!page)
+ return false;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(page_address(page), psz);
+
+ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to RD hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
+static int allocate_vpe_l1_table(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val, gpsz, npg, pa;
+ unsigned int psz = SZ_64K;
+ unsigned int np, epp, esz;
+ struct page *page;
+
+ if (!gic_rdists->has_rvpeid)
+ return 0;
+
+ /*
+ * if VPENDBASER.Valid is set, disable any previously programmed
+ * VPE by setting PendingLast while clearing Valid. This has the
+ * effect of making sure no doorbell will be generated and we can
+ * then safely clear VPROPBASER.Valid.
+ */
+ if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ vlpi_base + GICR_VPENDBASER);
+
+ /*
+ * If we can inherit the configuration from another RD, let's do
+ * so. Otherwise, we have to go through the allocation process. We
+ * assume that all RDs have the exact same requirements, as
+ * nothing will work otherwise.
+ */
+ val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
+ if (!gic_data_rdist()->vpe_table_mask)
+ return -ENOMEM;
+
+ val = inherit_vpe_l1_table_from_its();
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ /* First probe the page size */
+ val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
+
+ switch (gpsz) {
+ default:
+ gpsz = GIC_PAGE_SIZE_4K;
+ fallthrough;
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /*
+ * Start populating the register from scratch, including RO fields
+ * (which we want to print in debug cases...)
+ */
+ val = 0;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
+
+ /* How many entries per GIC page? */
+ esz++;
+ epp = psz / (esz * SZ_8);
+
+ /*
+ * If we need more than just a single L1 page, flag the table
+ * as indirect and compute the number of required L1 pages.
+ */
+ if (epp < ITS_MAX_VPEID) {
+ int nl2;
+
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+
+ /* Number of L2 pages required to cover the VPEID space */
+ nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
+
+ /* Number of L1 pages to point to the L2 pages */
+ npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
+ } else {
+ npg = 1;
+ }
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
+
+ /* Right, that's the number of CPU pages we need for L1 */
+ np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
+
+ pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
+ np, npg, psz, epp, esz);
+ page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ gic_data_rdist()->vpe_l1_base = page_address(page);
+ pa = virt_to_phys(page_address(page));
+ WARN_ON(!IS_ALIGNED(pa, psz));
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ val |= GICR_VPROPBASER_4_1_Z;
+ val |= GICR_VPROPBASER_4_1_VALID;
+
+out:
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
+
+ pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
+ smp_processor_id(), val,
+ cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
+
+ return 0;
+}
+
+static int its_alloc_collections(struct its_node *its)
+{
+ int i;
+
+ its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
+ GFP_KERNEL);
+ if (!its->collections)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ its->collections[i].target_address = ~0ULL;
+
+ return 0;
+}
+
+static struct page *its_allocate_pending_table(gfp_t gfp_flags)
+{
+ struct page *pend_page;
+
+ pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+ get_order(LPI_PENDBASE_SZ));
+ if (!pend_page)
+ return NULL;
+
+ /* Make sure the GIC will observe the zero-ed page */
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
+
+ return pend_page;
+}
+
+static void its_free_pending_table(struct page *pt)
+{
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+}
+
+/*
+ * Booting with kdump and LPIs enabled is generally fine. Any other
+ * case is wrong in the absence of firmware/EFI support.
+ */
+static bool enabled_lpis_allowed(void)
+{
+ phys_addr_t addr;
+ u64 val;
+
+ /* Check whether the property table is in a reserved region */
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ addr = val & GENMASK_ULL(51, 12);
+
+ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
+}
+
+static int __init allocate_lpi_tables(void)
+{
+ u64 val;
+ int err, cpu;
+
+ /*
+ * If LPIs are enabled while we run this from the boot CPU,
+ * flag the RD tables as pre-allocated if the stars do align.
+ */
+ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
+ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
+ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
+ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
+ pr_info("GICv3: Using preallocated redistributor tables\n");
+ }
+
+ err = its_setup_lpi_prop_table();
+ if (err)
+ return err;
+
+ /*
+ * We allocate all the pending tables anyway, as we may have a
+ * mix of RDs that have had LPIs enabled, and some that
+ * don't. We'll free the unused ones as each CPU comes online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *pend_page;
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ }
+
+ return 0;
+}
+
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
+{
+ u32 count = 1000000; /* 1s! */
+ bool clean;
+ u64 val;
+
+ do {
+ val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ clean = !(val & GICR_VPENDBASER_Dirty);
+ if (!clean) {
+ count--;
+ cpu_relax();
+ udelay(1);
+ }
+ } while (!clean && count);
+
+ if (unlikely(!clean))
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+ return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+ u64 val;
+
+ /* Make sure we wait until the RD is done with the initial scan */
+ val = read_vpend_dirty_clear(vlpi_base);
+ val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ val = read_vpend_dirty_clear(vlpi_base);
+ if (unlikely(val & GICR_VPENDBASER_Dirty))
+ val |= GICR_VPENDBASER_PendingLast;
+
+ return val;
+}
+
+static void its_cpu_init_lpis(void)
+{
+ void __iomem *rbase = gic_data_rdist_rd_base();
+ struct page *pend_page;
+ phys_addr_t paddr;
+ u64 val, tmp;
+
+ if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
+ return;
+
+ val = readl_relaxed(rbase + GICR_CTLR);
+ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
+ (val & GICR_CTLR_ENABLE_LPIS)) {
+ /*
+ * Check that we get the same property table on all
+ * RDs. If we don't, this is hopeless.
+ */
+ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
+ paddr &= GENMASK_ULL(51, 12);
+ if (WARN_ON(gic_rdists->prop_table_pa != paddr))
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+ paddr &= GENMASK_ULL(51, 16);
+
+ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
+ gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
+
+ goto out;
+ }
+
+ pend_page = gic_data_rdist()->pend_page;
+ paddr = page_to_phys(pend_page);
+
+ /* set PROPBASE */
+ val = (gic_rdists->prop_table_pa |
+ GICR_PROPBASER_InnerShareable |
+ GICR_PROPBASER_RaWaWb |
+ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
+
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
+
+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must
+ * remove the cacheability attributes as
+ * well.
+ */
+ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
+ GICR_PROPBASER_CACHEABILITY_MASK);
+ val |= GICR_PROPBASER_nC;
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ }
+ pr_info_once("GIC: using cache flushing for LPI property table\n");
+ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
+ }
+
+ /* set PENDBASE */
+ val = (page_to_phys(pend_page) |
+ GICR_PENDBASER_InnerShareable |
+ GICR_PENDBASER_RaWaWb);
+
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+
+ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must remove the
+ * cacheability attributes as well.
+ */
+ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
+ GICR_PENDBASER_CACHEABILITY_MASK);
+ val |= GICR_PENDBASER_nC;
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ }
+
+ /* Enable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+ /*
+ * It's possible for CPU to receive VLPIs before it is
+ * scheduled as a vPE, especially for the first CPU, and the
+ * VLPI with INTID larger than 2^(IDbits+1) will be considered
+ * as out of range and dropped by GIC.
+ * So we initialize IDbits to known value to avoid VLPI drop.
+ */
+ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+ smp_processor_id(), val);
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ /*
+ * Also clear Valid bit of GICR_VPENDBASER, in case some
+ * ancient programming gets left in and has possibility of
+ * corrupting memory.
+ */
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
+ }
+
+ if (allocate_vpe_l1_table()) {
+ /*
+ * If the allocation has failed, we're in massive trouble.
+ * Disable direct injection, and pray that no VM was
+ * already running...
+ */
+ gic_rdists->has_rvpeid = false;
+ gic_rdists->has_vlpis = false;
+ }
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+out:
+ gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+ gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
+ "reserved" : "allocated",
+ &paddr);
+}
+
+static void its_cpu_init_collection(struct its_node *its)
+{
+ int cpu = smp_processor_id();
+ u64 target;
+
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ return;
+ }
+
+ /*
+ * We now have to bind each collection to its target
+ * redistributor.
+ */
+ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ /*
+ * This ITS wants the physical address of the
+ * redistributor.
+ */
+ target = gic_data_rdist()->phys_base;
+ } else {
+ /* This ITS wants a linear CPU number. */
+ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = GICR_TYPER_CPU_NUMBER(target) << 16;
+ }
+
+ /* Perform collection mapping */
+ its->collections[cpu].target_address = target;
+ its->collections[cpu].col_id = cpu;
+
+ its_send_mapc(its, &its->collections[cpu], 1);
+ its_send_invall(its, &its->collections[cpu]);
+}
+
+static void its_cpu_init_collections(void)
+{
+ struct its_node *its;
+
+ raw_spin_lock(&its_lock);
+
+ list_for_each_entry(its, &its_nodes, entry)
+ its_cpu_init_collection(its);
+
+ raw_spin_unlock(&its_lock);
+}
+
+static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
+{
+ struct its_device *its_dev = NULL, *tmp;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&its->lock, flags);
+
+ list_for_each_entry(tmp, &its->its_device_list, entry) {
+ if (tmp->device_id == dev_id) {
+ its_dev = tmp;
+ break;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&its->lock, flags);
+
+ return its_dev;
+}
+
+static struct its_baser *its_get_baser(struct its_node *its, u32 type)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (GITS_BASER_TYPE(its->tables[i].val) == type)
+ return &its->tables[i];
+ }
+
+ return NULL;
+}
+
+static bool its_alloc_table_entry(struct its_node *its,
+ struct its_baser *baser, u32 id)
+{
+ struct page *page;
+ u32 esz, idx;
+ __le64 *table;
+
+ /* Don't allow device id that exceeds single, flat table limit */
+ esz = GITS_BASER_ENTRY_SIZE(baser->val);
+ if (!(baser->val & GITS_BASER_INDIRECT))
+ return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = id >> ilog2(baser->psz / esz);
+ if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
+ return false;
+
+ table = baser->base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx]) {
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
+ if (!page)
+ return false;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(page_address(page), baser->psz);
+
+ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to ITS hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+ struct its_baser *baser;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow device id that exceeds ITS hardware limit */
+ if (!baser)
+ return (ilog2(dev_id) < device_ids(its));
+
+ return its_alloc_table_entry(its, baser, dev_id);
+}
+
+static bool its_alloc_vpe_table(u32 vpe_id)
+{
+ struct its_node *its;
+ int cpu;
+
+ /*
+ * Make sure the L2 tables are allocated on *all* v4 ITSs. We
+ * could try and only do it on ITSs corresponding to devices
+ * that have interrupts targeted at this VPE, but the
+ * complexity becomes crazy (and you have tons of memory
+ * anyway, right?).
+ */
+ list_for_each_entry(its, &its_nodes, entry) {
+ struct its_baser *baser;
+
+ if (!is_v4(its))
+ continue;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
+ if (!baser)
+ return false;
+
+ if (!its_alloc_table_entry(its, baser, vpe_id))
+ return false;
+ }
+
+ /* Non v4.1? No need to iterate RDs and go back early. */
+ if (!gic_rdists->has_rvpeid)
+ return true;
+
+ /*
+ * Make sure the L2 tables are allocated for all copies of
+ * the L1 table on *all* v4.1 RDs.
+ */
+ for_each_possible_cpu(cpu) {
+ if (!allocate_vpe_l2_table(cpu, vpe_id))
+ return false;
+ }
+
+ return true;
+}
+
+static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ int nvecs, bool alloc_lpis)
+{
+ struct its_device *dev;
+ unsigned long *lpi_map = NULL;
+ unsigned long flags;
+ u16 *col_map = NULL;
+ void *itt;
+ int lpi_base;
+ int nr_lpis;
+ int nr_ites;
+ int sz;
+
+ if (!its_alloc_device_table(its, dev_id))
+ return NULL;
+
+ if (WARN_ON(!is_power_of_2(nvecs)))
+ nvecs = roundup_pow_of_two(nvecs);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ /*
+ * Even if the device wants a single LPI, the ITT must be
+ * sized as a power of two (and you need at least one bit...).
+ */
+ nr_ites = max(2, nvecs);
+ sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
+ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+ if (alloc_lpis) {
+ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
+ if (lpi_map)
+ col_map = kcalloc(nr_lpis, sizeof(*col_map),
+ GFP_KERNEL);
+ } else {
+ col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
+ nr_lpis = 0;
+ lpi_base = 0;
+ }
+
+ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
+ kfree(dev);
+ kfree(itt);
+ bitmap_free(lpi_map);
+ kfree(col_map);
+ return NULL;
+ }
+
+ gic_flush_dcache_to_poc(itt, sz);
+
+ dev->its = its;
+ dev->itt = itt;
+ dev->nr_ites = nr_ites;
+ dev->event_map.lpi_map = lpi_map;
+ dev->event_map.col_map = col_map;
+ dev->event_map.lpi_base = lpi_base;
+ dev->event_map.nr_lpis = nr_lpis;
+ raw_spin_lock_init(&dev->event_map.vlpi_lock);
+ dev->device_id = dev_id;
+ INIT_LIST_HEAD(&dev->entry);
+
+ raw_spin_lock_irqsave(&its->lock, flags);
+ list_add(&dev->entry, &its->its_device_list);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
+
+ /* Map device to its ITT */
+ its_send_mapd(dev, 1);
+
+ return dev;
+}
+
+static void its_free_device(struct its_device *its_dev)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&its_dev->its->lock, flags);
+ list_del(&its_dev->entry);
+ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
+ kfree(its_dev->event_map.col_map);
+ kfree(its_dev->itt);
+ kfree(its_dev);
+}
+
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
+{
+ int idx;
+
+ /* Find a free LPI region in lpi_map and allocate them. */
+ idx = bitmap_find_free_region(dev->event_map.lpi_map,
+ dev->event_map.nr_lpis,
+ get_count_order(nvecs));
+ if (idx < 0)
+ return -ENOSPC;
+
+ *hwirq = dev->event_map.lpi_base + idx;
+
+ return 0;
+}
+
+static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct its_node *its;
+ struct its_device *its_dev;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int err = 0;
+
+ /*
+ * We ignore "dev" entirely, and rely on the dev_id that has
+ * been passed via the scratchpad. This limits this domain's
+ * usefulness to upper layers that definitely know that they
+ * are built on top of the ITS.
+ */
+ dev_id = info->scratchpad[0].ul;
+
+ msi_info = msi_get_domain_info(domain);
+ its = msi_info->data;
+
+ if (!gic_rdists->has_direct_lpi &&
+ vpe_proxy.dev &&
+ vpe_proxy.dev->its == its &&
+ dev_id == vpe_proxy.dev->device_id) {
+ /* Bad luck. Get yourself a better implementation */
+ WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
+ dev_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&its->dev_alloc_lock);
+ its_dev = its_find_device(its, dev_id);
+ if (its_dev) {
+ /*
+ * We already have seen this ID, probably through
+ * another alias (PCI bridge of some sort). No need to
+ * create the device.
+ */
+ its_dev->shared = true;
+ pr_debug("Reusing ITT for devID %x\n", dev_id);
+ goto out;
+ }
+
+ its_dev = its_create_device(its, dev_id, nvec, true);
+ if (!its_dev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
+ its_dev->shared = true;
+
+ pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
+out:
+ mutex_unlock(&its->dev_alloc_lock);
+ info->scratchpad[0].ptr = its_dev;
+ return err;
+}
+
+static struct msi_domain_ops its_msi_domain_ops = {
+ .msi_prepare = its_msi_prepare,
+};
+
+static int its_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+
+ if (irq_domain_get_of_node(domain->parent)) {
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = GIC_IRQ_TYPE_LPI;
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ return -EINVAL;
+ }
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+ struct its_node *its = its_dev->its;
+ struct irq_data *irqd;
+ irq_hw_number_t hwirq;
+ int err;
+ int i;
+
+ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+ if (err)
+ return err;
+
+ err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
+ if (err)
+ return err;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ return err;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i, &its_irq_chip, its_dev);
+ irqd = irq_get_irq_data(virq + i);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
+ pr_debug("ID:%d pID:%d vID:%d\n",
+ (int)(hwirq + i - its_dev->event_map.lpi_base),
+ (int)(hwirq + i), virq + i);
+ }
+
+ return 0;
+}
+
+static int its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool reserve)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int cpu;
+
+ cpu = its_select_cpu(d, cpu_online_mask);
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ its_inc_lpi_count(d, cpu);
+ its_dev->event_map.col_map[event] = cpu;
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ /* Map the GIC IRQ and event to the device */
+ its_send_mapti(its_dev, d->hwirq, event);
+ return 0;
+}
+
+static void its_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
+ /* Stop the delivery of interrupts */
+ its_send_discard(its_dev, event);
+}
+
+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its = its_dev->its;
+ int i;
+
+ bitmap_release_region(its_dev->event_map.lpi_map,
+ its_get_event_id(irq_domain_get_irq_data(domain, virq)),
+ get_count_order(nr_irqs));
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+ /* Nuke the entry in the domain */
+ irq_domain_reset_irq_data(data);
+ }
+
+ mutex_lock(&its->dev_alloc_lock);
+
+ /*
+ * If all interrupts have been freed, start mopping the
+ * floor. This is conditioned on the device not being shared.
+ */
+ if (!its_dev->shared &&
+ bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)) {
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
+
+ /* Unmap device/itt */
+ its_send_mapd(its_dev, 0);
+ its_free_device(its_dev);
+ }
+
+ mutex_unlock(&its->dev_alloc_lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops its_domain_ops = {
+ .alloc = its_irq_domain_alloc,
+ .free = its_irq_domain_free,
+ .activate = its_irq_domain_activate,
+ .deactivate = its_irq_domain_deactivate,
+};
+
+/*
+ * This is insane.
+ *
+ * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
+ * likely), the only way to perform an invalidate is to use a fake
+ * device to issue an INV command, implying that the LPI has first
+ * been mapped to some event on that device. Since this is not exactly
+ * cheap, we try to keep that mapping around as long as possible, and
+ * only issue an UNMAP if we're short on available slots.
+ *
+ * Broken by design(tm).
+ *
+ * GICv4.1, on the other hand, mandates that we're able to invalidate
+ * by writing to a MMIO register. It doesn't implement the whole of
+ * DirectLPI, but that's good enough. And most of the time, we don't
+ * even have to invalidate anything, as the redistributor can be told
+ * whether to generate a doorbell or not (we thus leave it enabled,
+ * always).
+ */
+static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
+{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
+ /* Already unmapped? */
+ if (vpe->vpe_proxy_event == -1)
+ return;
+
+ its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
+ vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
+
+ /*
+ * We don't track empty slots at all, so let's move the
+ * next_victim pointer if we can quickly reuse that slot
+ * instead of nuking an existing entry. Not clear that this is
+ * always a win though, and this might just generate a ripple
+ * effect... Let's just hope VPEs don't migrate too often.
+ */
+ if (vpe_proxy.vpes[vpe_proxy.next_victim])
+ vpe_proxy.next_victim = vpe->vpe_proxy_event;
+
+ vpe->vpe_proxy_event = -1;
+}
+
+static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
+{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
+ if (!gic_rdists->has_direct_lpi) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+ its_vpe_db_proxy_unmap_locked(vpe);
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+ }
+}
+
+static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
+{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
+ /* Already mapped? */
+ if (vpe->vpe_proxy_event != -1)
+ return;
+
+ /* This slot was already allocated. Kick the other VPE out. */
+ if (vpe_proxy.vpes[vpe_proxy.next_victim])
+ its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
+
+ /* Map the new VPE instead */
+ vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
+ vpe->vpe_proxy_event = vpe_proxy.next_victim;
+ vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
+
+ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
+ its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
+}
+
+static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
+{
+ unsigned long flags;
+ struct its_collection *target_col;
+
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
+ if (gic_rdists->has_direct_lpi) {
+ void __iomem *rdbase;
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+ wait_for_syncr(rdbase);
+
+ return;
+ }
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+ its_vpe_db_proxy_map_locked(vpe);
+
+ target_col = &vpe_proxy.dev->its->collections[to];
+ its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
+ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
+
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static int its_vpe_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ int from, cpu = cpumask_first(mask_val);
+ unsigned long flags;
+
+ /*
+ * Changing affinity is mega expensive, so let's be as lazy as
+ * we can and only do it if we really have to. Also, if mapped
+ * into the proxy device, we need to move the doorbell
+ * interrupt to its new location.
+ *
+ * Another thing is that changing the affinity of a vPE affects
+ * *other interrupts* such as all the vLPIs that are routed to
+ * this vPE. This means that the irq_desc lock is not enough to
+ * protect us, and that we must ensure nobody samples vpe->col_idx
+ * during the update, hence the lock below which must also be
+ * taken on any vLPI handling path that evaluates vpe->col_idx.
+ */
+ from = vpe_to_cpuid_lock(vpe, &flags);
+ if (from == cpu)
+ goto out;
+
+ vpe->col_idx = cpu;
+
+ /*
+ * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+ * is sharing its VPE table with the current one.
+ */
+ if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+ cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ goto out;
+
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+
+out:
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ vpe_to_cpuid_unlock(vpe, flags);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_wait_vpt_parse_complete(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (!gic_rdists->has_vpend_valid_dirty)
+ return;
+
+ WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+ val,
+ !(val & GICR_VPENDBASER_Dirty),
+ 1, 500));
+}
+
+static void its_vpe_schedule(struct its_vpe *vpe)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ /* Schedule the VPE */
+ val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
+ GENMASK_ULL(51, 12);
+ val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ val = virt_to_phys(page_address(vpe->vpt_page)) &
+ GENMASK_ULL(51, 16);
+ val |= GICR_VPENDBASER_RaWaWb;
+ val |= GICR_VPENDBASER_InnerShareable;
+ /*
+ * There is no good way of finding out if the pending table is
+ * empty as we can race against the doorbell interrupt very
+ * easily. So in the end, vpe->pending_last is only an
+ * indication that the vcpu has something pending, not one
+ * that the pending table is empty. A good implementation
+ * would be able to read its coarse map pretty quickly anyway,
+ * making this a tolerable issue.
+ */
+ val |= GICR_VPENDBASER_PendingLast;
+ val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+ val |= GICR_VPENDBASER_Valid;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_deschedule(struct its_vpe *vpe)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
+
+ vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+}
+
+static void its_vpe_invall(struct its_vpe *vpe)
+{
+ struct its_node *its;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+
+ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
+ /*
+ * Sending a VINVALL to a single ITS is enough, as all
+ * we need is to reach the redistributors.
+ */
+ its_send_vinvall(its, vpe);
+ return;
+ }
+}
+
+static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_schedule(vpe);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_deschedule(vpe);
+ return 0;
+
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
+ case INVALL_VPE:
+ its_vpe_invall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void its_vpe_send_cmd(struct its_vpe *vpe,
+ void (*cmd)(struct its_device *, u32))
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+ its_vpe_db_proxy_map_locked(vpe);
+ cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
+
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static void its_vpe_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ if (gic_rdists->has_direct_lpi)
+ __direct_lpi_inv(d, d->parent_data->hwirq);
+ else
+ its_vpe_send_cmd(vpe, its_send_inv);
+}
+
+static void its_vpe_mask_irq(struct irq_data *d)
+{
+ /*
+ * We need to unmask the LPI, which is described by the parent
+ * irq_data. Instead of calling into the parent (which won't
+ * exactly do the right thing, let's simply use the
+ * parent_data pointer. Yes, I'm naughty.
+ */
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_send_inv(d);
+}
+
+static void its_vpe_unmask_irq(struct irq_data *d)
+{
+ /* Same hack as above... */
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_send_inv(d);
+}
+
+static int its_vpe_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (gic_rdists->has_direct_lpi) {
+ void __iomem *rdbase;
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ if (state) {
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
+ } else {
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+ wait_for_syncr(rdbase);
+ }
+ } else {
+ if (state)
+ its_vpe_send_cmd(vpe, its_send_int);
+ else
+ its_vpe_send_cmd(vpe, its_send_clear);
+ }
+
+ return 0;
+}
+
+static int its_vpe_retrigger(struct irq_data *d)
+{
+ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
+static struct irq_chip its_vpe_irq_chip = {
+ .name = "GICv4-vpe",
+ .irq_mask = its_vpe_mask_irq,
+ .irq_unmask = its_vpe_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_retrigger = its_vpe_retrigger,
+ .irq_set_irqchip_state = its_vpe_set_irqchip_state,
+ .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
+};
+
+static struct its_node *find_4_1_its(void)
+{
+ static struct its_node *its = NULL;
+
+ if (!its) {
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (is_v4_1(its))
+ return its;
+ }
+
+ /* Oops? */
+ its = NULL;
+ }
+
+ return its;
+}
+
+static void its_vpe_4_1_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * GICv4.1 wants doorbells to be invalidated using the
+ * INVDB command in order to be broadcast to all RDs. Send
+ * it to the first valid ITS, and let the HW do its magic.
+ */
+ its = find_4_1_its();
+ if (its)
+ its_send_invdb(its, vpe);
+}
+
+static void its_vpe_4_1_mask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_unmask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val = 0;
+
+ /* Schedule the VPE */
+ val |= GICR_VPENDBASER_Valid;
+ val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
+ val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
+ val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (info->req_db) {
+ unsigned long flags;
+
+ /*
+ * vPE is going to block: make the vPE non-resident with
+ * PendingLast clear and DB set. The GIC guarantees that if
+ * we read-back PendingLast clear, then a doorbell will be
+ * delivered when an interrupt comes.
+ *
+ * Note the locking to deal with the concurrent update of
+ * pending_last from the doorbell interrupt handler that can
+ * run concurrently.
+ */
+ raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
+ val = its_clear_vpend_valid(vlpi_base,
+ GICR_VPENDBASER_PendingLast,
+ GICR_VPENDBASER_4_1_DB);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+ } else {
+ /*
+ * We're not blocking, so just make the vPE non-resident
+ * with PendingLast set, indicating that we'll be back.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ 0,
+ GICR_VPENDBASER_PendingLast);
+ vpe->pending_last = true;
+ }
+}
+
+static void its_vpe_4_1_invall(struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ unsigned long flags;
+ u64 val;
+ int cpu;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ /* Target the redistributor this vPE is currently known on */
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+
+ wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
+}
+
+static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_4_1_schedule(vpe, info);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_4_1_deschedule(vpe, info);
+ return 0;
+
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
+ case INVALL_VPE:
+ its_vpe_4_1_invall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_vpe_4_1_irq_chip = {
+ .name = "GICv4.1-vpe",
+ .irq_mask = its_vpe_4_1_mask_irq,
+ .irq_unmask = its_vpe_4_1_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
+};
+
+static void its_configure_sgi(struct irq_data *d, bool clear)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_desc desc;
+
+ desc.its_vsgi_cmd.vpe = vpe;
+ desc.its_vsgi_cmd.sgi = d->hwirq;
+ desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
+ desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
+ desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
+ desc.its_vsgi_cmd.clear = clear;
+
+ /*
+ * GICv4.1 allows us to send VSGI commands to any ITS as long as the
+ * destination VPE is mapped there. Since we map them eagerly at
+ * activation time, we're pretty sure the first GICv4.1 ITS will do.
+ */
+ its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
+}
+
+static void its_sgi_mask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+}
+
+static void its_sgi_unmask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = true;
+ its_configure_sgi(d, false);
+}
+
+static int its_sgi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ /*
+ * There is no notion of affinity for virtual SGIs, at least
+ * not on the host (since they can only be targeting a vPE).
+ * Tell the kernel we've done whatever it asked for.
+ */
+ irq_data_update_effective_affinity(d, mask_val);
+ return IRQ_SET_MASK_OK;
+}
+
+static int its_sgi_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state) {
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its = find_4_1_its();
+ u64 val;
+
+ val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
+ val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
+ writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
+ } else {
+ its_configure_sgi(d, true);
+ }
+
+ return 0;
+}
+
+static int its_sgi_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *val)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ void __iomem *base;
+ unsigned long flags;
+ u32 count = 1000000; /* 1s! */
+ u32 status;
+ int cpu;
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ /*
+ * Locking galore! We can race against two different events:
+ *
+ * - Concurrent vPE affinity change: we must make sure it cannot
+ * happen, or we'll talk to the wrong redistributor. This is
+ * identical to what happens with vLPIs.
+ *
+ * - Concurrent VSGIPENDR access: As it involves accessing two
+ * MMIO registers, this must be made atomic one way or another.
+ */
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
+ writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
+ do {
+ status = readl_relaxed(base + GICR_VSGIPENDR);
+ if (!(status & GICR_VSGIPENDR_BUSY))
+ goto out;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("Unable to get SGI status\n");
+ goto out;
+ }
+ cpu_relax();
+ udelay(1);
+ } while (count);
+
+out:
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
+
+ if (!count)
+ return -ENXIO;
+
+ *val = !!(status & (1 << d->hwirq));
+
+ return 0;
+}
+
+static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case PROP_UPDATE_VSGI:
+ vpe->sgi_config[d->hwirq].priority = info->priority;
+ vpe->sgi_config[d->hwirq].group = info->group;
+ its_configure_sgi(d, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_sgi_irq_chip = {
+ .name = "GICv4.1-sgi",
+ .irq_mask = its_sgi_mask_irq,
+ .irq_unmask = its_sgi_unmask_irq,
+ .irq_set_affinity = its_sgi_set_affinity,
+ .irq_set_irqchip_state = its_sgi_set_irqchip_state,
+ .irq_get_irqchip_state = its_sgi_get_irqchip_state,
+ .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
+};
+
+static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct its_vpe *vpe = args;
+ int i;
+
+ /* Yes, we do want 16 SGIs */
+ WARN_ON(nr_irqs != 16);
+
+ for (i = 0; i < 16; i++) {
+ vpe->sgi_config[i].priority = 0;
+ vpe->sgi_config[i].enabled = false;
+ vpe->sgi_config[i].group = false;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ &its_sgi_irq_chip, vpe);
+ irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
+ }
+
+ return 0;
+}
+
+static void its_sgi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Nothing to do */
+}
+
+static int its_sgi_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool reserve)
+{
+ /* Write out the initial SGI configuration */
+ its_configure_sgi(d, false);
+ return 0;
+}
+
+static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ /*
+ * The VSGI command is awkward:
+ *
+ * - To change the configuration, CLEAR must be set to false,
+ * leaving the pending bit unchanged.
+ * - To clear the pending bit, CLEAR must be set to true, leaving
+ * the configuration unchanged.
+ *
+ * You just can't do both at once, hence the two commands below.
+ */
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+ its_configure_sgi(d, true);
+}
+
+static const struct irq_domain_ops its_sgi_domain_ops = {
+ .alloc = its_sgi_irq_domain_alloc,
+ .free = its_sgi_irq_domain_free,
+ .activate = its_sgi_irq_domain_activate,
+ .deactivate = its_sgi_irq_domain_deactivate,
+};
+
+static int its_vpe_id_alloc(void)
+{
+ return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
+}
+
+static void its_vpe_id_free(u16 id)
+{
+ ida_simple_remove(&its_vpeid_ida, id);
+}
+
+static int its_vpe_init(struct its_vpe *vpe)
+{
+ struct page *vpt_page;
+ int vpe_id;
+
+ /* Allocate vpe_id */
+ vpe_id = its_vpe_id_alloc();
+ if (vpe_id < 0)
+ return vpe_id;
+
+ /* Allocate VPT */
+ vpt_page = its_allocate_pending_table(GFP_KERNEL);
+ if (!vpt_page) {
+ its_vpe_id_free(vpe_id);
+ return -ENOMEM;
+ }
+
+ if (!its_alloc_vpe_table(vpe_id)) {
+ its_vpe_id_free(vpe_id);
+ its_free_pending_table(vpt_page);
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_init(&vpe->vpe_lock);
+ vpe->vpe_id = vpe_id;
+ vpe->vpt_page = vpt_page;
+ if (gic_rdists->has_rvpeid)
+ atomic_set(&vpe->vmapp_count, 0);
+ else
+ vpe->vpe_proxy_event = -1;
+
+ return 0;
+}
+
+static void its_vpe_teardown(struct its_vpe *vpe)
+{
+ its_vpe_db_proxy_unmap(vpe);
+ its_vpe_id_free(vpe->vpe_id);
+ its_free_pending_table(vpe->vpt_page);
+}
+
+static void its_vpe_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct its_vm *vm = domain->host_data;
+ int i;
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
+
+ BUG_ON(vm != vpe->its_vm);
+
+ clear_bit(data->hwirq, vm->db_bitmap);
+ its_vpe_teardown(vpe);
+ irq_domain_reset_irq_data(data);
+ }
+
+ if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
+ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_free_prop_table(vm->vprop_page);
+ }
+}
+
+static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct irq_chip *irqchip = &its_vpe_irq_chip;
+ struct its_vm *vm = args;
+ unsigned long *bitmap;
+ struct page *vprop_page;
+ int base, nr_ids, i, err = 0;
+
+ BUG_ON(!vm);
+
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
+ if (!bitmap)
+ return -ENOMEM;
+
+ if (nr_ids < nr_irqs) {
+ its_lpi_free(bitmap, base, nr_ids);
+ return -ENOMEM;
+ }
+
+ vprop_page = its_allocate_prop_table(GFP_KERNEL);
+ if (!vprop_page) {
+ its_lpi_free(bitmap, base, nr_ids);
+ return -ENOMEM;
+ }
+
+ vm->db_bitmap = bitmap;
+ vm->db_lpi_base = base;
+ vm->nr_db_lpis = nr_ids;
+ vm->vprop_page = vprop_page;
+
+ if (gic_rdists->has_rvpeid)
+ irqchip = &its_vpe_4_1_irq_chip;
+
+ for (i = 0; i < nr_irqs; i++) {
+ vm->vpes[i]->vpe_db_lpi = base + i;
+ err = its_vpe_init(vm->vpes[i]);
+ if (err)
+ break;
+ err = its_irq_gic_domain_alloc(domain, virq + i,
+ vm->vpes[i]->vpe_db_lpi);
+ if (err)
+ break;
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ irqchip, vm->vpes[i]);
+ set_bit(i, bitmap);
+ }
+
+ if (err) {
+ if (i > 0)
+ its_vpe_irq_domain_free(domain, virq, i);
+
+ its_lpi_free(bitmap, base, nr_ids);
+ its_free_prop_table(vprop_page);
+ }
+
+ return err;
+}
+
+static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool reserve)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * If we use the list map, we issue VMAPP on demand... Unless
+ * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
+ * so that VSGIs can work.
+ */
+ if (!gic_requires_eager_mapping())
+ return 0;
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
+ return 0;
+}
+
+static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * If we use the list map on GICv4.0, we unmap the VPE once no
+ * VLPIs are associated with the VM.
+ */
+ if (!gic_requires_eager_mapping())
+ return;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!is_v4(its))
+ continue;
+
+ its_send_vmapp(its, vpe, false);
+ }
+
+ /*
+ * There may be a direct read to the VPT after unmapping the
+ * vPE, to guarantee the validity of this, we make the VPT
+ * memory coherent with the CPU caches here.
+ */
+ if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
+ gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
+ LPI_PENDBASE_SZ);
+}
+
+static const struct irq_domain_ops its_vpe_domain_ops = {
+ .alloc = its_vpe_irq_domain_alloc,
+ .free = its_vpe_irq_domain_free,
+ .activate = its_vpe_irq_domain_activate,
+ .deactivate = its_vpe_irq_domain_deactivate,
+};
+
+static int its_force_quiescent(void __iomem *base)
+{
+ u32 count = 1000000; /* 1s */
+ u32 val;
+
+ val = readl_relaxed(base + GITS_CTLR);
+ /*
+ * GIC architecture specification requires the ITS to be both
+ * disabled and quiescent for writes to GITS_BASER<n> or
+ * GITS_CBASER to not have UNPREDICTABLE results.
+ */
+ if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
+ return 0;
+
+ /* Disable the generation of all interrupts to this ITS */
+ val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
+ writel_relaxed(val, base + GITS_CTLR);
+
+ /* Poll GITS_CTLR and wait until ITS becomes quiescent */
+ while (1) {
+ val = readl_relaxed(base + GITS_CTLR);
+ if (val & GITS_CTLR_QUIESCENT)
+ return 0;
+
+ count--;
+ if (!count)
+ return -EBUSY;
+
+ cpu_relax();
+ udelay(1);
+ }
+}
+
+static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
+{
+ struct its_node *its = data;
+
+ /* erratum 22375: only alloc 8MB table size (20 bits) */
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+
+ return true;
+}
+
+static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+
+ return true;
+}
+
+static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+ struct its_node *its = data;
+
+ /* On QDF2400, the size of the ITE is 16Bytes */
+ its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
+ its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
+
+ return true;
+}
+
+static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ /*
+ * The Socionext Synquacer SoC has a so-called 'pre-ITS',
+ * which maps 32-bit writes targeted at a separate window of
+ * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
+ * with device ID taken from bits [device_id_bits + 1:2] of
+ * the window offset.
+ */
+ return its->pre_its_base + (its_dev->device_id << 2);
+}
+
+static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
+{
+ struct its_node *its = data;
+ u32 pre_its_window[2];
+ u32 ids;
+
+ if (!fwnode_property_read_u32_array(its->fwnode_handle,
+ "socionext,synquacer-pre-its",
+ pre_its_window,
+ ARRAY_SIZE(pre_its_window))) {
+
+ its->pre_its_base = pre_its_window[0];
+ its->get_msi_base = its_irq_get_msi_base_pre_its;
+
+ ids = ilog2(pre_its_window[1]) - 2;
+ if (device_ids(its) > ids) {
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
+ }
+
+ /* the pre-ITS breaks isolation, so disable MSI remapping */
+ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return true;
+ }
+ return false;
+}
+
+static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
+{
+ struct its_node *its = data;
+
+ /*
+ * Hip07 insists on using the wrong address for the VLPI
+ * page. Trick it into doing the right thing...
+ */
+ its->vlpi_redist_offset = SZ_128K;
+ return true;
+}
+
+static const struct gic_quirk its_quirks[] = {
+#ifdef CONFIG_CAVIUM_ERRATUM_22375
+ {
+ .desc = "ITS: Cavium errata 22375, 24313",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_22375,
+ },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
+#endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+ {
+ .desc = "ITS: QDF2400 erratum 0065",
+ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_qdf2400_e0065,
+ },
+#endif
+#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
+ {
+ /*
+ * The Socionext Synquacer SoC incorporates ARM's own GIC-500
+ * implementation, but with a 'pre-ITS' added that requires
+ * special handling in software.
+ */
+ .desc = "ITS: Socionext Synquacer pre-ITS",
+ .iidr = 0x0001143b,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_socionext_synquacer,
+ },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161600802
+ {
+ .desc = "ITS: Hip07 erratum 161600802",
+ .iidr = 0x00000004,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip07_161600802,
+ },
+#endif
+ {
+ }
+};
+
+static void its_enable_quirks(struct its_node *its)
+{
+ u32 iidr = readl_relaxed(its->base + GITS_IIDR);
+
+ gic_enable_quirks(iidr, its_quirks, its);
+}
+
+static int its_save_disable(void)
+{
+ struct its_node *its;
+ int err = 0;
+
+ raw_spin_lock(&its_lock);
+ list_for_each_entry(its, &its_nodes, entry) {
+ void __iomem *base;
+
+ base = its->base;
+ its->ctlr_save = readl_relaxed(base + GITS_CTLR);
+ err = its_force_quiescent(base);
+ if (err) {
+ pr_err("ITS@%pa: failed to quiesce: %d\n",
+ &its->phys_base, err);
+ writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+ goto err;
+ }
+
+ its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
+ }
+
+err:
+ if (err) {
+ list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
+ void __iomem *base;
+
+ base = its->base;
+ writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+ }
+ }
+ raw_spin_unlock(&its_lock);
+
+ return err;
+}
+
+static void its_restore_enable(void)
+{
+ struct its_node *its;
+ int ret;
+
+ raw_spin_lock(&its_lock);
+ list_for_each_entry(its, &its_nodes, entry) {
+ void __iomem *base;
+ int i;
+
+ base = its->base;
+
+ /*
+ * Make sure that the ITS is disabled. If it fails to quiesce,
+ * don't restore it since writing to CBASER or BASER<n>
+ * registers is undefined according to the GIC v3 ITS
+ * Specification.
+ *
+ * Firmware resuming with the ITS enabled is terminally broken.
+ */
+ WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
+ ret = its_force_quiescent(base);
+ if (ret) {
+ pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
+ &its->phys_base, ret);
+ continue;
+ }
+
+ gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
+
+ /*
+ * Writing CBASER resets CREADR to 0, so make CWRITER and
+ * cmd_write line up with it.
+ */
+ its->cmd_write = its->cmd_base;
+ gits_write_cwriter(0, base + GITS_CWRITER);
+
+ /* Restore GITS_BASER from the value cache. */
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = &its->tables[i];
+
+ if (!(baser->val & GITS_BASER_VALID))
+ continue;
+
+ its_write_baser(its, baser, baser->val);
+ }
+ writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+
+ /*
+ * Reinit the collection if it's stored in the ITS. This is
+ * indicated by the col_id being less than the HCC field.
+ * CID < HCC as specified in the GIC v3 Documentation.
+ */
+ if (its->collections[smp_processor_id()].col_id <
+ GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
+ its_cpu_init_collection(its);
+ }
+ raw_spin_unlock(&its_lock);
+}
+
+static struct syscore_ops its_syscore_ops = {
+ .suspend = its_save_disable,
+ .resume = its_restore_enable,
+};
+
+static void __init __iomem *its_map_one(struct resource *res, int *err)
+{
+ void __iomem *its_base;
+ u32 val;
+
+ its_base = ioremap(res->start, SZ_64K);
+ if (!its_base) {
+ pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
+ *err = -ENODEV;
+ goto out_unmap;
+ }
+
+ *err = its_force_quiescent(its_base);
+ if (*err) {
+ pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
+ goto out_unmap;
+ }
+
+ return its_base;
+
+out_unmap:
+ iounmap(its_base);
+ return NULL;
+}
+
+static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
+{
+ struct irq_domain *inner_domain;
+ struct msi_domain_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
+ if (!inner_domain) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ inner_domain->parent = its_parent;
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+ inner_domain->flags |= its->msi_domain_flags;
+ info->ops = &its_msi_domain_ops;
+ info->data = its;
+ inner_domain->host_data = info;
+
+ return 0;
+}
+
+static int its_init_vpe_domain(void)
+{
+ struct its_node *its;
+ u32 devid;
+ int entries;
+
+ if (gic_rdists->has_direct_lpi) {
+ pr_info("ITS: Using DirectLPI for VPE invalidation\n");
+ return 0;
+ }
+
+ /* Any ITS will do, even if not v4 */
+ its = list_first_entry(&its_nodes, struct its_node, entry);
+
+ entries = roundup_pow_of_two(nr_cpu_ids);
+ vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
+ GFP_KERNEL);
+ if (!vpe_proxy.vpes)
+ return -ENOMEM;
+
+ /* Use the last possible DevID */
+ devid = GENMASK(device_ids(its) - 1, 0);
+ vpe_proxy.dev = its_create_device(its, devid, entries, false);
+ if (!vpe_proxy.dev) {
+ kfree(vpe_proxy.vpes);
+ pr_err("ITS: Can't allocate GICv4 proxy device\n");
+ return -ENOMEM;
+ }
+
+ BUG_ON(entries > vpe_proxy.dev->nr_ites);
+
+ raw_spin_lock_init(&vpe_proxy.lock);
+ vpe_proxy.next_victim = 0;
+ pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
+ devid, vpe_proxy.dev->nr_ites);
+
+ return 0;
+}
+
+static int __init its_compute_its_list_map(struct resource *res,
+ void __iomem *its_base)
+{
+ int its_number;
+ u32 ctlr;
+
+ /*
+ * This is assumed to be done early enough that we're
+ * guaranteed to be single-threaded, hence no
+ * locking. Should this change, we should address
+ * this.
+ */
+ its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
+ if (its_number >= GICv4_ITS_LIST_MAX) {
+ pr_err("ITS@%pa: No ITSList entry available!\n",
+ &res->start);
+ return -EINVAL;
+ }
+
+ ctlr = readl_relaxed(its_base + GITS_CTLR);
+ ctlr &= ~GITS_CTLR_ITS_NUMBER;
+ ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
+ writel_relaxed(ctlr, its_base + GITS_CTLR);
+ ctlr = readl_relaxed(its_base + GITS_CTLR);
+ if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
+ its_number = ctlr & GITS_CTLR_ITS_NUMBER;
+ its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
+ }
+
+ if (test_and_set_bit(its_number, &its_list_map)) {
+ pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
+ &res->start, its_number);
+ return -EINVAL;
+ }
+
+ return its_number;
+}
+
+static int __init its_probe_one(struct resource *res,
+ struct fwnode_handle *handle, int numa_node)
+{
+ struct its_node *its;
+ void __iomem *its_base;
+ u64 baser, tmp, typer;
+ struct page *page;
+ u32 ctlr;
+ int err;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
+
+ pr_info("ITS %pR\n", res);
+
+ its = kzalloc(sizeof(*its), GFP_KERNEL);
+ if (!its) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ raw_spin_lock_init(&its->lock);
+ mutex_init(&its->dev_alloc_lock);
+ INIT_LIST_HEAD(&its->entry);
+ INIT_LIST_HEAD(&its->its_device_list);
+ typer = gic_read_typer(its_base + GITS_TYPER);
+ its->typer = typer;
+ its->base = its_base;
+ its->phys_base = res->start;
+ if (is_v4(its)) {
+ if (!(typer & GITS_TYPER_VMOVP)) {
+ err = its_compute_its_list_map(res, its_base);
+ if (err < 0)
+ goto out_free_its;
+
+ its->list_nr = err;
+
+ pr_info("ITS@%pa: Using ITS number %d\n",
+ &res->start, err);
+ } else {
+ pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+ }
+
+ if (is_v4_1(its)) {
+ u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+
+ its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
+ if (!its->sgir_base) {
+ err = -ENOMEM;
+ goto out_free_its;
+ }
+
+ its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+
+ pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
+ &res->start, its->mpidr, svpet);
+ }
+ }
+
+ its->numa_node = numa_node;
+
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ if (!page) {
+ err = -ENOMEM;
+ goto out_unmap_sgir;
+ }
+ its->cmd_base = (void *)page_address(page);
+ its->cmd_write = its->cmd_base;
+ its->fwnode_handle = handle;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
+
+ its_enable_quirks(its);
+
+ err = its_alloc_tables(its);
+ if (err)
+ goto out_free_cmd;
+
+ err = its_alloc_collections(its);
+ if (err)
+ goto out_free_tables;
+
+ baser = (virt_to_phys(its->cmd_base) |
+ GITS_CBASER_RaWaWb |
+ GITS_CBASER_InnerShareable |
+ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
+ GITS_CBASER_VALID);
+
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ tmp = gits_read_cbaser(its->base + GITS_CBASER);
+
+ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
+ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must
+ * remove the cacheability attributes as
+ * well.
+ */
+ baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
+ GITS_CBASER_CACHEABILITY_MASK);
+ baser |= GITS_CBASER_nC;
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ }
+ pr_info("ITS: using cache flushing for cmd queue\n");
+ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
+ }
+
+ gits_write_cwriter(0, its->base + GITS_CWRITER);
+ ctlr = readl_relaxed(its->base + GITS_CTLR);
+ ctlr |= GITS_CTLR_ENABLE;
+ if (is_v4(its))
+ ctlr |= GITS_CTLR_ImDe;
+ writel_relaxed(ctlr, its->base + GITS_CTLR);
+
+ err = its_init_domain(handle, its);
+ if (err)
+ goto out_free_tables;
+
+ raw_spin_lock(&its_lock);
+ list_add(&its->entry, &its_nodes);
+ raw_spin_unlock(&its_lock);
+
+ return 0;
+
+out_free_tables:
+ its_free_tables(its);
+out_free_cmd:
+ free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+out_unmap_sgir:
+ if (its->sgir_base)
+ iounmap(its->sgir_base);
+out_free_its:
+ kfree(its);
+out_unmap:
+ iounmap(its_base);
+ pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
+ return err;
+}
+
+static bool gic_rdists_supports_plpis(void)
+{
+ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+}
+
+static int redist_disable_lpis(void)
+{
+ void __iomem *rbase = gic_data_rdist_rd_base();
+ u64 timeout = USEC_PER_SEC;
+ u64 val;
+
+ if (!gic_rdists_supports_plpis()) {
+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+ return -ENXIO;
+ }
+
+ val = readl_relaxed(rbase + GICR_CTLR);
+ if (!(val & GICR_CTLR_ENABLE_LPIS))
+ return 0;
+
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world.
+ *
+ * If running with preallocated tables, there is nothing to do.
+ */
+ if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
+ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
+ return 0;
+
+ /*
+ * From that point on, we only try to do some damage control.
+ */
+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+ smp_processor_id());
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ /* Disable LPIs */
+ val &= ~GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ /* Make sure any change to GICR_CTLR is observable by the GIC */
+ dsb(sy);
+
+ /*
+ * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
+ * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
+ * Error out if we time out waiting for RWP to clear.
+ */
+ while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
+ if (!timeout) {
+ pr_err("CPU%d: Timeout while disabling LPIs\n",
+ smp_processor_id());
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ timeout--;
+ }
+
+ /*
+ * After it has been written to 1, it is IMPLEMENTATION
+ * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
+ * cleared to 0. Error out if clearing the bit failed.
+ */
+ if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
+ pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int its_cpu_init(void)
+{
+ if (!list_empty(&its_nodes)) {
+ int ret;
+
+ ret = redist_disable_lpis();
+ if (ret)
+ return ret;
+
+ its_cpu_init_lpis();
+ its_cpu_init_collections();
+ }
+
+ return 0;
+}
+
+static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
+{
+ cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+}
+
+static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
+ rdist_memreserve_cpuhp_cleanup_workfn);
+
+static int its_cpu_memreserve_lpi(unsigned int cpu)
+{
+ struct page *pend_page;
+ int ret = 0;
+
+ /* This gets to run exactly once per CPU */
+ if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
+ return 0;
+
+ pend_page = gic_data_rdist()->pend_page;
+ if (WARN_ON(!pend_page)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*
+ * If the pending table was pre-programmed, free the memory we
+ * preemptively allocated. Otherwise, reserve that memory for
+ * later kexecs.
+ */
+ if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
+ its_free_pending_table(pend_page);
+ gic_data_rdist()->pend_page = NULL;
+ } else {
+ phys_addr_t paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+ }
+
+out:
+ /* Last CPU being brought up gets to issue the cleanup */
+ if (!IS_ENABLED(CONFIG_SMP) ||
+ cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
+ schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
+
+ gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
+ return ret;
+}
+
+/* Mark all the BASER registers as invalid before they get reprogrammed */
+static int __init its_reset_one(struct resource *res)
+{
+ void __iomem *its_base;
+ int err, i;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++)
+ gits_write_baser(0, its_base + GITS_BASER + (i << 3));
+
+ iounmap(its_base);
+ return 0;
+}
+
+static const struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_of_probe(struct device_node *node)
+{
+ struct device_node *np;
+ struct resource res;
+
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ int err;
+
+ if (!of_device_is_available(np) ||
+ !of_property_read_bool(np, "msi-controller") ||
+ of_address_to_resource(np, 0, &res))
+ continue;
+
+ err = its_reset_one(&res);
+ if (err)
+ return err;
+ }
+
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
+ if (!of_property_read_bool(np, "msi-controller")) {
+ pr_warn("%pOF: no msi-controller property, ITS ignored\n",
+ np);
+ continue;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_warn("%pOF: no regs?\n", np);
+ continue;
+ }
+
+ its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
+ }
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+
+#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
+
+#ifdef CONFIG_ACPI_NUMA
+struct its_srat_map {
+ /* numa node id */
+ u32 numa_node;
+ /* GIC ITS ID */
+ u32 its_id;
+};
+
+static struct its_srat_map *its_srat_maps __initdata;
+static int its_in_srat __initdata;
+
+static int __init acpi_get_its_numa_node(u32 its_id)
+{
+ int i;
+
+ for (i = 0; i < its_in_srat; i++) {
+ if (its_id == its_srat_maps[i].its_id)
+ return its_srat_maps[i].numa_node;
+ }
+ return NUMA_NO_NODE;
+}
+
+static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
+static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ int node;
+ struct acpi_srat_gic_its_affinity *its_affinity;
+
+ its_affinity = (struct acpi_srat_gic_its_affinity *)header;
+ if (!its_affinity)
+ return -EINVAL;
+
+ if (its_affinity->header.length < sizeof(*its_affinity)) {
+ pr_err("SRAT: Invalid header length %d in ITS affinity\n",
+ its_affinity->header.length);
+ return -EINVAL;
+ }
+
+ /*
+ * Note that in theory a new proximity node could be created by this
+ * entry as it is an SRAT resource allocation structure.
+ * We do not currently support doing so.
+ */
+ node = pxm_to_node(its_affinity->proximity_domain);
+
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
+ return 0;
+ }
+
+ its_srat_maps[its_in_srat].numa_node = node;
+ its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
+ its_in_srat++;
+ pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
+ its_affinity->proximity_domain, its_affinity->its_id, node);
+
+ return 0;
+}
+
+static void __init acpi_table_parse_srat_its(void)
+{
+ int count;
+
+ count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+ gic_acpi_match_srat_its, 0);
+ if (count <= 0)
+ return;
+
+ its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
+ GFP_KERNEL);
+ if (!its_srat_maps)
+ return;
+
+ acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+ gic_acpi_parse_srat_its, 0);
+}
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+ kfree(its_srat_maps);
+}
+#else
+static void __init acpi_table_parse_srat_its(void) { }
+static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
+#endif
+
+static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct fwnode_handle *dom_handle;
+ struct resource res;
+ int err;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ memset(&res, 0, sizeof(res));
+ res.start = its_entry->base_address;
+ res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
+ res.flags = IORESOURCE_MEM;
+
+ dom_handle = irq_domain_alloc_fwnode(&res.start);
+ if (!dom_handle) {
+ pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
+ &res.start);
+ return -ENOMEM;
+ }
+
+ err = iort_register_domain_token(its_entry->translation_id, res.start,
+ dom_handle);
+ if (err) {
+ pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
+ &res.start, its_entry->translation_id);
+ goto dom_err;
+ }
+
+ err = its_probe_one(&res, dom_handle,
+ acpi_get_its_numa_node(its_entry->translation_id));
+ if (!err)
+ return 0;
+
+ iort_deregister_domain_token(its_entry->translation_id);
+dom_err:
+ irq_domain_free_fwnode(dom_handle);
+ return err;
+}
+
+static int __init its_acpi_reset(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct resource res;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ res = (struct resource) {
+ .start = its_entry->base_address,
+ .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return its_reset_one(&res);
+}
+
+static void __init its_acpi_probe(void)
+{
+ acpi_table_parse_srat_its();
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_acpi_reset, 0) > 0)
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ gic_acpi_parse_madt_its, 0);
+ acpi_its_srat_maps_free();
+}
+#else
+static void __init its_acpi_probe(void) { }
+#endif
+
+int __init its_lpi_memreserve_init(void)
+{
+ int state;
+
+ if (!efi_enabled(EFI_CONFIG_TABLES))
+ return 0;
+
+ if (list_empty(&its_nodes))
+ return 0;
+
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/arm/gicv3/memreserve:online",
+ its_cpu_memreserve_lpi,
+ NULL);
+ if (state < 0)
+ return state;
+
+ gic_rdists->cpuhp_memreserve_state = state;
+
+ return 0;
+}
+
+int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
+ struct irq_domain *parent_domain)
+{
+ struct device_node *of_node;
+ struct its_node *its;
+ bool has_v4 = false;
+ bool has_v4_1 = false;
+ int err;
+
+ gic_rdists = rdists;
+
+ its_parent = parent_domain;
+ of_node = to_of_node(handle);
+ if (of_node)
+ its_of_probe(of_node);
+ else
+ its_acpi_probe();
+
+ if (list_empty(&its_nodes)) {
+ pr_warn("ITS: No ITS available, not enabling LPIs\n");
+ return -ENXIO;
+ }
+
+ err = allocate_lpi_tables();
+ if (err)
+ return err;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ has_v4 |= is_v4(its);
+ has_v4_1 |= is_v4_1(its);
+ }
+
+ /* Don't bother with inconsistent systems */
+ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
+ rdists->has_rvpeid = false;
+
+ if (has_v4 & rdists->has_vlpis) {
+ const struct irq_domain_ops *sgi_ops;
+
+ if (has_v4_1)
+ sgi_ops = &its_sgi_domain_ops;
+ else
+ sgi_ops = NULL;
+
+ if (its_init_vpe_domain() ||
+ its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
+ rdists->has_vlpis = false;
+ pr_err("ITS: Disabling GICv4 support\n");
+ }
+ }
+
+ register_syscore_ops(&its_syscore_ops);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
new file mode 100644
index 000000000..e1efdec9e
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#define pr_fmt(fmt) "GICv3: " fmt
+
+#include <linux/iommu.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+struct mbi_range {
+ u32 spi_start;
+ u32 nr_spis;
+ unsigned long *bm;
+};
+
+static DEFINE_MUTEX(mbi_lock);
+static phys_addr_t mbi_phys_base;
+static struct mbi_range *mbi_ranges;
+static unsigned int mbi_range_nr;
+
+static struct irq_chip mbi_irq_chip = {
+ .name = "MBI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int mbi_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int err;
+
+ /*
+ * Using ACPI? There is no MBI support in the spec, you
+ * shouldn't even be here.
+ */
+ if (!is_of_node(domain->parent->fwnode))
+ return -EINVAL;
+
+ /*
+ * Let's default to edge. This is consistent with traditional
+ * MSIs, and systems requiring level signaling will just
+ * enforce the trigger on their own.
+ */
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq,
+ int nr_irqs)
+{
+ mutex_lock(&mbi_lock);
+ bitmap_release_region(mbi->bm, hwirq - mbi->spi_start,
+ get_count_order(nr_irqs));
+ mutex_unlock(&mbi_lock);
+}
+
+static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct mbi_range *mbi = NULL;
+ int hwirq, offset, i, err = 0;
+
+ mutex_lock(&mbi_lock);
+ for (i = 0; i < mbi_range_nr; i++) {
+ offset = bitmap_find_free_region(mbi_ranges[i].bm,
+ mbi_ranges[i].nr_spis,
+ get_count_order(nr_irqs));
+ if (offset >= 0) {
+ mbi = &mbi_ranges[i];
+ break;
+ }
+ }
+ mutex_unlock(&mbi_lock);
+
+ if (!mbi)
+ return -ENOSPC;
+
+ hwirq = mbi->spi_start + offset;
+
+ err = iommu_dma_prepare_msi(info->desc,
+ mbi_phys_base + GICD_SETSPI_NSR);
+ if (err)
+ return err;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto fail;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mbi_irq_chip, mbi);
+ }
+
+ return 0;
+
+fail:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ mbi_free_msi(mbi, hwirq, nr_irqs);
+ return err;
+}
+
+static void mbi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mbi_range *mbi = irq_data_get_irq_chip_data(d);
+
+ mbi_free_msi(mbi, d->hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mbi_domain_ops = {
+ .alloc = mbi_irq_domain_alloc,
+ .free = mbi_irq_domain_free,
+};
+
+static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ msg[0].address_hi = upper_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
+ msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
+ msg[0].data = data->parent_data->hwirq;
+
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+}
+
+#ifdef CONFIG_PCI_MSI
+/* PCI-specific irqchip */
+static void mbi_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void mbi_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip mbi_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = mbi_mask_msi_irq,
+ .irq_unmask = mbi_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_compose_msi_msg = mbi_compose_msi_msg,
+};
+
+static struct msi_domain_info mbi_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &mbi_msi_irq_chip,
+};
+
+static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
+ struct irq_domain **pci_domain)
+{
+ *pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode,
+ &mbi_msi_domain_info,
+ nexus_domain);
+ if (!*pci_domain)
+ return -ENOMEM;
+
+ return 0;
+}
+#else
+static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
+ struct irq_domain **pci_domain)
+{
+ *pci_domain = NULL;
+ return 0;
+}
+#endif
+
+static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ mbi_compose_msi_msg(data, msg);
+
+ msg[1].address_hi = upper_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
+ msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
+ msg[1].data = data->parent_data->hwirq;
+
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
+}
+
+/* Platform-MSI specific irqchip */
+static struct irq_chip mbi_pmsi_irq_chip = {
+ .name = "pMSI",
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_compose_msi_msg = mbi_compose_mbi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static struct msi_domain_ops mbi_pmsi_ops = {
+};
+
+static struct msi_domain_info mbi_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_LEVEL_CAPABLE),
+ .ops = &mbi_pmsi_ops,
+ .chip = &mbi_pmsi_irq_chip,
+};
+
+static int mbi_allocate_domains(struct irq_domain *parent)
+{
+ struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
+ int err;
+
+ nexus_domain = irq_domain_create_tree(parent->fwnode,
+ &mbi_domain_ops, NULL);
+ if (!nexus_domain)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
+ nexus_domain->parent = parent;
+
+ err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
+
+ plat_domain = platform_msi_create_irq_domain(parent->fwnode,
+ &mbi_pmsi_domain_info,
+ nexus_domain);
+
+ if (err || !plat_domain) {
+ if (plat_domain)
+ irq_domain_remove(plat_domain);
+ if (pci_domain)
+ irq_domain_remove(pci_domain);
+ irq_domain_remove(nexus_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
+{
+ struct device_node *np;
+ const __be32 *reg;
+ int ret, n;
+
+ np = to_of_node(fwnode);
+
+ if (!of_property_read_bool(np, "msi-controller"))
+ return 0;
+
+ n = of_property_count_elems_of_size(np, "mbi-ranges", sizeof(u32));
+ if (n <= 0 || n % 2)
+ return -EINVAL;
+
+ mbi_range_nr = n / 2;
+ mbi_ranges = kcalloc(mbi_range_nr, sizeof(*mbi_ranges), GFP_KERNEL);
+ if (!mbi_ranges)
+ return -ENOMEM;
+
+ for (n = 0; n < mbi_range_nr; n++) {
+ ret = of_property_read_u32_index(np, "mbi-ranges", n * 2,
+ &mbi_ranges[n].spi_start);
+ if (ret)
+ goto err_free_mbi;
+ ret = of_property_read_u32_index(np, "mbi-ranges", n * 2 + 1,
+ &mbi_ranges[n].nr_spis);
+ if (ret)
+ goto err_free_mbi;
+
+ mbi_ranges[n].bm = bitmap_zalloc(mbi_ranges[n].nr_spis, GFP_KERNEL);
+ if (!mbi_ranges[n].bm) {
+ ret = -ENOMEM;
+ goto err_free_mbi;
+ }
+ pr_info("MBI range [%d:%d]\n", mbi_ranges[n].spi_start,
+ mbi_ranges[n].spi_start + mbi_ranges[n].nr_spis - 1);
+ }
+
+ reg = of_get_property(np, "mbi-alias", NULL);
+ if (reg) {
+ mbi_phys_base = of_translate_address(np, reg);
+ if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
+ ret = -ENXIO;
+ goto err_free_mbi;
+ }
+ } else {
+ struct resource res;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ ret = -ENXIO;
+ goto err_free_mbi;
+ }
+
+ mbi_phys_base = res.start;
+ }
+
+ pr_info("Using MBI frame %pa\n", &mbi_phys_base);
+
+ ret = mbi_allocate_domains(parent);
+ if (ret)
+ goto err_free_mbi;
+
+ return 0;
+
+err_free_mbi:
+ if (mbi_ranges) {
+ for (n = 0; n < mbi_range_nr; n++)
+ bitmap_free(mbi_ranges[n].bm);
+ kfree(mbi_ranges);
+ }
+
+ return ret;
+}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644
index 000000000..11f7c53e4
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -0,0 +1,2577 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#define pr_fmt(fmt) "GICv3: " fmt
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-common.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/arm-smccc.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/virt.h>
+
+#include "irq-gic-common.h"
+
+#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
+
+#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
+#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
+#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
+
+#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
+
+struct redist_region {
+ void __iomem *redist_base;
+ phys_addr_t phys_base;
+ bool single_redist;
+};
+
+struct gic_chip_data {
+ struct fwnode_handle *fwnode;
+ phys_addr_t dist_phys_base;
+ void __iomem *dist_base;
+ struct redist_region *redist_regions;
+ struct rdists rdists;
+ struct irq_domain *domain;
+ u64 redist_stride;
+ u32 nr_redist_regions;
+ u64 flags;
+ bool has_rss;
+ unsigned int ppi_nr;
+ struct partition_desc **ppi_descs;
+};
+
+#define T241_CHIPS_MAX 4
+static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
+static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
+
+static struct gic_chip_data gic_data __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+
+/*
+ * The behaviours of RPR and PMR registers differ depending on the value of
+ * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
+ * distributor and redistributors depends on whether security is enabled in the
+ * GIC.
+ *
+ * When security is enabled, non-secure priority values from the (re)distributor
+ * are presented to the GIC CPUIF as follow:
+ * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
+ *
+ * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
+ * EL1 are subject to a similar operation thus matching the priorities presented
+ * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
+ * these values are unchanged by the GIC.
+ *
+ * see GICv3/GICv4 Architecture Specification (IHI0069D):
+ * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
+ * priorities.
+ * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
+ * interrupt.
+ */
+static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+
+/*
+ * Global static key controlling whether an update to PMR allowing more
+ * interrupts requires to be propagated to the redistributor (DSB SY).
+ * And this needs to be exported for modules to be able to enable
+ * interrupts...
+ */
+DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
+EXPORT_SYMBOL(gic_pmr_sync);
+
+DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
+EXPORT_SYMBOL(gic_nonsecure_priorities);
+
+/*
+ * When the Non-secure world has access to group 0 interrupts (as a
+ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
+ * return the Distributor's view of the interrupt priority.
+ *
+ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
+ * written by software is moved to the Non-secure range by the Distributor.
+ *
+ * If both are true (which is when gic_nonsecure_priorities gets enabled),
+ * we need to shift down the priority programmed by software to match it
+ * against the value returned by ICC_RPR_EL1.
+ */
+#define GICD_INT_RPR_PRI(priority) \
+ ({ \
+ u32 __priority = (priority); \
+ if (static_branch_unlikely(&gic_nonsecure_priorities)) \
+ __priority = 0x80 | (__priority >> 1); \
+ \
+ __priority; \
+ })
+
+/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
+static refcount_t *ppi_nmi_refs;
+
+static struct gic_kvm_info gic_v3_kvm_info __initdata;
+static DEFINE_PER_CPU(bool, has_rss);
+
+#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
+
+/* Our default, arbitrary priority value. Linux only uses one anyway. */
+#define DEFAULT_PMR_VALUE 0xf0
+
+enum gic_intid_range {
+ SGI_RANGE,
+ PPI_RANGE,
+ SPI_RANGE,
+ EPPI_RANGE,
+ ESPI_RANGE,
+ LPI_RANGE,
+ __INVALID_RANGE__
+};
+
+static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
+{
+ switch (hwirq) {
+ case 0 ... 15:
+ return SGI_RANGE;
+ case 16 ... 31:
+ return PPI_RANGE;
+ case 32 ... 1019:
+ return SPI_RANGE;
+ case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
+ return EPPI_RANGE;
+ case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
+ return ESPI_RANGE;
+ case 8192 ... GENMASK(23, 0):
+ return LPI_RANGE;
+ default:
+ return __INVALID_RANGE__;
+ }
+}
+
+static enum gic_intid_range get_intid_range(struct irq_data *d)
+{
+ return __get_intid_range(d->hwirq);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+static inline bool gic_irq_in_rdist(struct irq_data *d)
+{
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
+{
+ if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 chip;
+
+ /*
+ * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
+ * registers are directed to the chip that owns the SPI. The
+ * the alias region can also be used for writes to the
+ * GICD_In{E} except GICD_ICENABLERn. Each chip has support
+ * for 320 {E}SPIs. Mappings for all 4 chips:
+ * Chip0 = 32-351
+ * Chip1 = 352-671
+ * Chip2 = 672-991
+ * Chip3 = 4096-4415
+ */
+ switch (__get_intid_range(hwirq)) {
+ case SPI_RANGE:
+ chip = (hwirq - 32) / 320;
+ break;
+ case ESPI_RANGE:
+ chip = 3;
+ break;
+ default:
+ unreachable();
+ }
+ return t241_dist_base_alias[chip];
+ }
+
+ return gic_data.dist_base;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ /* SGI+PPI -> SGI_base for this CPU */
+ return gic_data_rdist_sgi_base();
+
+ case SPI_RANGE:
+ case ESPI_RANGE:
+ /* SPI -> dist_base */
+ return gic_data.dist_base;
+
+ default:
+ return NULL;
+ }
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
+{
+ u32 count = 1000000; /* 1s! */
+
+ while (readl_relaxed(base + GICD_CTLR) & bit) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("RWP timeout, gone fishing\n");
+ return;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
+}
+
+#ifdef CONFIG_ARM64
+
+static u64 __maybe_unused gic_read_iar(void)
+{
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
+ return gic_read_iar_cavium_thunderx();
+ else
+ return gic_read_iar_common();
+}
+#endif
+
+static void gic_enable_redist(bool enable)
+{
+ void __iomem *rbase;
+ u32 count = 1000000; /* 1s! */
+ u32 val;
+
+ if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
+ return;
+
+ rbase = gic_data_rdist_rd_base();
+
+ val = readl_relaxed(rbase + GICR_WAKER);
+ if (enable)
+ /* Wake up this CPU redistributor */
+ val &= ~GICR_WAKER_ProcessorSleep;
+ else
+ val |= GICR_WAKER_ProcessorSleep;
+ writel_relaxed(val, rbase + GICR_WAKER);
+
+ if (!enable) { /* Check that GICR_WAKER is writeable */
+ val = readl_relaxed(rbase + GICR_WAKER);
+ if (!(val & GICR_WAKER_ProcessorSleep))
+ return; /* No PM support in this redistributor */
+ }
+
+ while (--count) {
+ val = readl_relaxed(rbase + GICR_WAKER);
+ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
+ break;
+ cpu_relax();
+ udelay(1);
+ }
+ if (!count)
+ pr_err_ratelimited("redistributor failed to %s...\n",
+ enable ? "wakeup" : "sleep");
+}
+
+/*
+ * Routines to disable, enable, EOI and route interrupts
+ */
+static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
+{
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ case SPI_RANGE:
+ *index = d->hwirq;
+ return offset;
+ case EPPI_RANGE:
+ /*
+ * Contrary to the ESPI range, the EPPI range is contiguous
+ * to the PPI range in the registers, so let's adjust the
+ * displacement accordingly. Consistency is overrated.
+ */
+ *index = d->hwirq - EPPI_BASE_INTID + 32;
+ return offset;
+ case ESPI_RANGE:
+ *index = d->hwirq - ESPI_BASE_INTID;
+ switch (offset) {
+ case GICD_ISENABLER:
+ return GICD_ISENABLERnE;
+ case GICD_ICENABLER:
+ return GICD_ICENABLERnE;
+ case GICD_ISPENDR:
+ return GICD_ISPENDRnE;
+ case GICD_ICPENDR:
+ return GICD_ICPENDRnE;
+ case GICD_ISACTIVER:
+ return GICD_ISACTIVERnE;
+ case GICD_ICACTIVER:
+ return GICD_ICACTIVERnE;
+ case GICD_IPRIORITYR:
+ return GICD_IPRIORITYRnE;
+ case GICD_ICFGR:
+ return GICD_ICFGRnE;
+ case GICD_IROUTER:
+ return GICD_IROUTERnE;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(1);
+ *index = d->hwirq;
+ return offset;
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+ void __iomem *base;
+ u32 index, mask;
+
+ offset = convert_offset_index(d, offset, &index);
+ mask = 1 << (index % 32);
+
+ if (gic_irq_in_rdist(d))
+ base = gic_data_rdist_sgi_base();
+ else
+ base = gic_dist_base_alias(d);
+
+ return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
+}
+
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+ void __iomem *base;
+ u32 index, mask;
+
+ offset = convert_offset_index(d, offset, &index);
+ mask = 1 << (index % 32);
+
+ if (gic_irq_in_rdist(d))
+ base = gic_data_rdist_sgi_base();
+ else
+ base = gic_data.dist_base;
+
+ writel_relaxed(mask, base + offset + (index / 32) * 4);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GICD_ICENABLER);
+ if (gic_irq_in_rdist(d))
+ gic_redist_wait_for_rwp();
+ else
+ gic_dist_wait_for_rwp();
+}
+
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+ gic_mask_irq(d);
+ /*
+ * When masking a forwarded interrupt, make sure it is
+ * deactivated as well.
+ *
+ * This ensures that an interrupt that is getting
+ * disabled/masked will not get "stuck", because there is
+ * noone to deactivate it (guest is being terminated).
+ */
+ if (irqd_is_forwarded_to_vcpu(d))
+ gic_poke_irq(d, GICD_ICACTIVER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static inline bool gic_supports_nmi(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
+ static_branch_likely(&supports_pseudo_nmis);
+}
+
+static int gic_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool val)
+{
+ u32 reg;
+
+ if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
+ return -EINVAL;
+
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ reg = val ? GICD_ISPENDR : GICD_ICPENDR;
+ break;
+
+ case IRQCHIP_STATE_ACTIVE:
+ reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
+ break;
+
+ case IRQCHIP_STATE_MASKED:
+ if (val) {
+ gic_mask_irq(d);
+ return 0;
+ }
+ reg = GICD_ISENABLER;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gic_poke_irq(d, reg);
+ return 0;
+}
+
+static int gic_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *val)
+{
+ if (d->hwirq >= 8192) /* PPI/SPI only */
+ return -EINVAL;
+
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ *val = gic_peek_irq(d, GICD_ISPENDR);
+ break;
+
+ case IRQCHIP_STATE_ACTIVE:
+ *val = gic_peek_irq(d, GICD_ISACTIVER);
+ break;
+
+ case IRQCHIP_STATE_MASKED:
+ *val = !gic_peek_irq(d, GICD_ISENABLER);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void gic_irq_set_prio(struct irq_data *d, u8 prio)
+{
+ void __iomem *base = gic_dist_base(d);
+ u32 offset, index;
+
+ offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
+
+ writeb_relaxed(prio, base + offset + index);
+}
+
+static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
+{
+ switch (__get_intid_range(hwirq)) {
+ case PPI_RANGE:
+ return hwirq - 16;
+ case EPPI_RANGE:
+ return hwirq - EPPI_BASE_INTID + 16;
+ default:
+ unreachable();
+ }
+}
+
+static u32 gic_get_ppi_index(struct irq_data *d)
+{
+ return __gic_get_ppi_index(d->hwirq);
+}
+
+static int gic_irq_nmi_setup(struct irq_data *d)
+{
+ struct irq_desc *desc = irq_to_desc(d->irq);
+
+ if (!gic_supports_nmi())
+ return -EINVAL;
+
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
+ return -EINVAL;
+ }
+
+ /*
+ * A secondary irq_chip should be in charge of LPI request,
+ * it should not be possible to get there
+ */
+ if (WARN_ON(gic_irq(d) >= 8192))
+ return -EINVAL;
+
+ /* desc lock should already be held */
+ if (gic_irq_in_rdist(d)) {
+ u32 idx = gic_get_ppi_index(d);
+
+ /* Setting up PPI as NMI, only switch handler for first NMI */
+ if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
+ refcount_set(&ppi_nmi_refs[idx], 1);
+ desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
+ }
+ } else {
+ desc->handle_irq = handle_fasteoi_nmi;
+ }
+
+ gic_irq_set_prio(d, GICD_INT_NMI_PRI);
+
+ return 0;
+}
+
+static void gic_irq_nmi_teardown(struct irq_data *d)
+{
+ struct irq_desc *desc = irq_to_desc(d->irq);
+
+ if (WARN_ON(!gic_supports_nmi()))
+ return;
+
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
+ return;
+ }
+
+ /*
+ * A secondary irq_chip should be in charge of LPI request,
+ * it should not be possible to get there
+ */
+ if (WARN_ON(gic_irq(d) >= 8192))
+ return;
+
+ /* desc lock should already be held */
+ if (gic_irq_in_rdist(d)) {
+ u32 idx = gic_get_ppi_index(d);
+
+ /* Tearing down NMI, only switch handler for last NMI */
+ if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
+ desc->handle_irq = handle_percpu_devid_irq;
+ } else {
+ desc->handle_irq = handle_fasteoi_irq;
+ }
+
+ gic_irq_set_prio(d, GICD_INT_DEF_PRI);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ isb();
+}
+
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+ /*
+ * No need to deactivate an LPI, or an interrupt that
+ * is is getting forwarded to a vcpu.
+ */
+ if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
+ return;
+ gic_write_dir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ enum gic_intid_range range;
+ unsigned int irq = gic_irq(d);
+ void __iomem *base;
+ u32 offset, index;
+ int ret;
+
+ range = get_intid_range(d);
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (range == SGI_RANGE)
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
+
+ /* SPIs have restrictions on the supported types */
+ if ((range == SPI_RANGE || range == ESPI_RANGE) &&
+ type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ if (gic_irq_in_rdist(d))
+ base = gic_data_rdist_sgi_base();
+ else
+ base = gic_dist_base_alias(d);
+
+ offset = convert_offset_index(d, GICD_ICFGR, &index);
+
+ ret = gic_configure_irq(index, type, base + offset, NULL);
+ if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+ if (get_intid_range(d) == SGI_RANGE)
+ return -EINVAL;
+
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
+ return 0;
+}
+
+static u64 gic_mpidr_to_affinity(unsigned long mpidr)
+{
+ u64 aff;
+
+ aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+ return aff;
+}
+
+static void gic_deactivate_unhandled(u32 irqnr)
+{
+ if (static_branch_likely(&supports_deactivate_key)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
+ isb();
+ }
+}
+
+/*
+ * Follow a read of the IAR with any HW maintenance that needs to happen prior
+ * to invoking the relevant IRQ handler. We must do two things:
+ *
+ * (1) Ensure instruction ordering between a read of IAR and subsequent
+ * instructions in the IRQ handler using an ISB.
+ *
+ * It is possible for the IAR to report an IRQ which was signalled *after*
+ * the CPU took an IRQ exception as multiple interrupts can race to be
+ * recognized by the GIC, earlier interrupts could be withdrawn, and/or
+ * later interrupts could be prioritized by the GIC.
+ *
+ * For devices which are tightly coupled to the CPU, such as PMUs, a
+ * context synchronization event is necessary to ensure that system
+ * register state is not stale, as these may have been indirectly written
+ * *after* exception entry.
+ *
+ * (2) Deactivate the interrupt when EOI mode 1 is in use.
+ */
+static inline void gic_complete_ack(u32 irqnr)
+{
+ if (static_branch_likely(&supports_deactivate_key))
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
+
+ isb();
+}
+
+static bool gic_rpr_is_nmi_prio(void)
+{
+ if (!gic_supports_nmi())
+ return false;
+
+ return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
+}
+
+static bool gic_irqnr_is_special(u32 irqnr)
+{
+ return irqnr >= 1020 && irqnr <= 1023;
+}
+
+static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
+
+ gic_complete_ack(irqnr);
+
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
+ }
+}
+
+static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
+
+ gic_complete_ack(irqnr);
+
+ if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
+ }
+}
+
+/*
+ * An exception has been taken from a context with IRQs enabled, and this could
+ * be an IRQ or an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
+ * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
+ * after handling any NMI but before handling any IRQ.
+ *
+ * The entry code has performed IRQ entry, and if an NMI is detected we must
+ * perform NMI entry/exit around invoking the handler.
+ */
+static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
+{
+ bool is_nmi;
+ u32 irqnr;
+
+ irqnr = gic_read_iar();
+
+ is_nmi = gic_rpr_is_nmi_prio();
+
+ if (is_nmi) {
+ nmi_enter();
+ __gic_handle_nmi(irqnr, regs);
+ nmi_exit();
+ }
+
+ if (gic_prio_masking_enabled()) {
+ gic_pmr_mask_irqs();
+ gic_arch_enable_irqs();
+ }
+
+ if (!is_nmi)
+ __gic_handle_irq(irqnr, regs);
+}
+
+/*
+ * An exception has been taken from a context with IRQs disabled, which can only
+ * be an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
+ * DAIF.IF (and ICC_PMR_EL1) unchanged.
+ *
+ * The entry code has performed NMI entry.
+ */
+static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
+{
+ u64 pmr;
+ u32 irqnr;
+
+ /*
+ * We were in a context with IRQs disabled. However, the
+ * entry code has set PMR to a value that allows any
+ * interrupt to be acknowledged, and not just NMIs. This can
+ * lead to surprising effects if the NMI has been retired in
+ * the meantime, and that there is an IRQ pending. The IRQ
+ * would then be taken in NMI context, something that nobody
+ * wants to debug twice.
+ *
+ * Until we sort this, drop PMR again to a level that will
+ * actually only allow NMIs before reading IAR, and then
+ * restore it to what it was.
+ */
+ pmr = gic_read_pmr();
+ gic_pmr_mask_irqs();
+ isb();
+ irqnr = gic_read_iar();
+ gic_write_pmr(pmr);
+
+ __gic_handle_nmi(irqnr, regs);
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
+ __gic_handle_irq_from_irqsoff(regs);
+ else
+ __gic_handle_irq_from_irqson(regs);
+}
+
+static u32 gic_get_pribits(void)
+{
+ u32 pribits;
+
+ pribits = gic_read_ctlr();
+ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
+ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ pribits++;
+
+ return pribits;
+}
+
+static bool gic_has_group0(void)
+{
+ u32 val;
+ u32 old_pmr;
+
+ old_pmr = gic_read_pmr();
+
+ /*
+ * Let's find out if Group0 is under control of EL3 or not by
+ * setting the highest possible, non-zero priority in PMR.
+ *
+ * If SCR_EL3.FIQ is set, the priority gets shifted down in
+ * order for the CPU interface to set bit 7, and keep the
+ * actual priority in the non-secure range. In the process, it
+ * looses the least significant bit and the actual priority
+ * becomes 0x80. Reading it back returns 0, indicating that
+ * we're don't have access to Group0.
+ */
+ gic_write_pmr(BIT(8 - gic_get_pribits()));
+ val = gic_read_pmr();
+
+ gic_write_pmr(old_pmr);
+
+ return val != 0;
+}
+
+static void __init gic_dist_init(void)
+{
+ unsigned int i;
+ u64 affinity;
+ void __iomem *base = gic_data.dist_base;
+ u32 val;
+
+ /* Disable the distributor */
+ writel_relaxed(0, base + GICD_CTLR);
+ gic_dist_wait_for_rwp();
+
+ /*
+ * Configure SPIs as non-secure Group-1. This will only matter
+ * if the GIC only has a single security state. This will not
+ * do the right thing if the kernel is running in secure mode,
+ * but that's not the intended use case anyway.
+ */
+ for (i = 32; i < GIC_LINE_NR; i += 32)
+ writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
+
+ /* Extended SPI range, not handled by the GICv2/GICv3 common code */
+ for (i = 0; i < GIC_ESPI_NR; i += 32) {
+ writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
+ writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
+ }
+
+ for (i = 0; i < GIC_ESPI_NR; i += 32)
+ writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
+
+ for (i = 0; i < GIC_ESPI_NR; i += 16)
+ writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
+
+ for (i = 0; i < GIC_ESPI_NR; i += 4)
+ writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
+
+ /* Now do the common stuff */
+ gic_dist_config(base, GIC_LINE_NR, NULL);
+
+ val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
+ if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
+ pr_info("Enabling SGIs without active state\n");
+ val |= GICD_CTLR_nASSGIreq;
+ }
+
+ /* Enable distributor with ARE, Group1, and wait for it to drain */
+ writel_relaxed(val, base + GICD_CTLR);
+ gic_dist_wait_for_rwp();
+
+ /*
+ * Set all global interrupts to the boot CPU only. ARE must be
+ * enabled.
+ */
+ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+ for (i = 32; i < GIC_LINE_NR; i++)
+ gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+
+ for (i = 0; i < GIC_ESPI_NR; i++)
+ gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
+}
+
+static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
+{
+ int ret = -ENODEV;
+ int i;
+
+ for (i = 0; i < gic_data.nr_redist_regions; i++) {
+ void __iomem *ptr = gic_data.redist_regions[i].redist_base;
+ u64 typer;
+ u32 reg;
+
+ reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (reg != GIC_PIDR2_ARCH_GICv3 &&
+ reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
+ pr_warn("No redistributor present @%p\n", ptr);
+ break;
+ }
+
+ do {
+ typer = gic_read_typer(ptr + GICR_TYPER);
+ ret = fn(gic_data.redist_regions + i, ptr);
+ if (!ret)
+ return 0;
+
+ if (gic_data.redist_regions[i].single_redist)
+ break;
+
+ if (gic_data.redist_stride) {
+ ptr += gic_data.redist_stride;
+ } else {
+ ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+ if (typer & GICR_TYPER_VLPIS)
+ ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+ }
+ } while (!(typer & GICR_TYPER_LAST));
+ }
+
+ return ret ? -ENODEV : 0;
+}
+
+static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
+{
+ unsigned long mpidr = cpu_logical_map(smp_processor_id());
+ u64 typer;
+ u32 aff;
+
+ /*
+ * Convert affinity to a 32bit value that can be matched to
+ * GICR_TYPER bits [63:32].
+ */
+ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+ typer = gic_read_typer(ptr + GICR_TYPER);
+ if ((typer >> 32) == aff) {
+ u64 offset = ptr - region->redist_base;
+ raw_spin_lock_init(&gic_data_rdist()->rd_lock);
+ gic_data_rdist_rd_base() = ptr;
+ gic_data_rdist()->phys_base = region->phys_base + offset;
+
+ pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
+ smp_processor_id(), mpidr,
+ (int)(region - gic_data.redist_regions),
+ &gic_data_rdist()->phys_base);
+ return 0;
+ }
+
+ /* Try next one */
+ return 1;
+}
+
+static int gic_populate_rdist(void)
+{
+ if (gic_iterate_rdists(__gic_populate_rdist) == 0)
+ return 0;
+
+ /* We couldn't even deal with ourselves... */
+ WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
+ smp_processor_id(),
+ (unsigned long)cpu_logical_map(smp_processor_id()));
+ return -ENODEV;
+}
+
+static int __gic_update_rdist_properties(struct redist_region *region,
+ void __iomem *ptr)
+{
+ u64 typer = gic_read_typer(ptr + GICR_TYPER);
+ u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
+
+ /* Boot-time cleanup */
+ if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
+ u64 val;
+
+ /* Deactivate any present vPE */
+ val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
+ if (val & GICR_VPENDBASER_Valid)
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ ptr + SZ_128K + GICR_VPENDBASER);
+
+ /* Mark the VPE table as invalid */
+ val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_VALID;
+ gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
+ }
+
+ gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
+
+ /*
+ * TYPER.RVPEID implies some form of DirectLPI, no matter what the
+ * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
+ * that the ITS driver can make use of for LPIs (and not VLPIs).
+ *
+ * These are 3 different ways to express the same thing, depending
+ * on the revision of the architecture and its relaxations over
+ * time. Just group them under the 'direct_lpi' banner.
+ */
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ !!(ctlr & GICR_CTLR_IR) |
+ gic_data.rdists.has_rvpeid);
+ gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
+
+ /* Detect non-sensical configurations */
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+ gic_data.rdists.has_direct_lpi = false;
+ gic_data.rdists.has_vlpis = false;
+ gic_data.rdists.has_rvpeid = false;
+ }
+
+ gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
+
+ return 1;
+}
+
+static void gic_update_rdist_properties(void)
+{
+ gic_data.ppi_nr = UINT_MAX;
+ gic_iterate_rdists(__gic_update_rdist_properties);
+ if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
+ gic_data.ppi_nr = 0;
+ pr_info("GICv3 features: %d PPIs%s%s\n",
+ gic_data.ppi_nr,
+ gic_data.has_rss ? ", RSS" : "",
+ gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
+
+ if (gic_data.rdists.has_vlpis)
+ pr_info("GICv4 features: %s%s%s\n",
+ gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
+ gic_data.rdists.has_rvpeid ? "RVPEID " : "",
+ gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
+}
+
+/* Check whether it's single security state view */
+static inline bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
+static void gic_cpu_sys_reg_init(void)
+{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+ bool group0;
+ u32 pribits;
+
+ /*
+ * Need to check that the SRE bit has actually been set. If
+ * not, it means that SRE is disabled at EL2. We're going to
+ * die painfully, and there is nothing we can do about it.
+ *
+ * Kindly inform the luser.
+ */
+ if (!gic_enable_sre())
+ pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+
+ pribits = gic_get_pribits();
+
+ group0 = gic_has_group0();
+
+ /* Set priority mask register */
+ if (!gic_prio_masking_enabled()) {
+ write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
+ } else if (gic_supports_nmi()) {
+ /*
+ * Mismatch configuration with boot CPU, the system is likely
+ * to die as interrupt masking will not work properly on all
+ * CPUs
+ *
+ * The boot CPU calls this function before enabling NMI support,
+ * and as a result we'll never see this warning in the boot path
+ * for that CPU.
+ */
+ if (static_branch_unlikely(&gic_nonsecure_priorities))
+ WARN_ON(!group0 || gic_dist_security_disabled());
+ else
+ WARN_ON(group0 && !gic_dist_security_disabled());
+ }
+
+ /*
+ * Some firmwares hand over to the kernel with the BPR changed from
+ * its reset value (and with a value large enough to prevent
+ * any pre-emptive interrupts from working at all). Writing a zero
+ * to BPR restores is reset value.
+ */
+ gic_write_bpr1(0);
+
+ if (static_branch_likely(&supports_deactivate_key)) {
+ /* EOI drops priority only (mode 1) */
+ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
+ } else {
+ /* EOI deactivates interrupt too (mode 0) */
+ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+ }
+
+ /* Always whack Group0 before Group1 */
+ if (group0) {
+ switch(pribits) {
+ case 8:
+ case 7:
+ write_gicreg(0, ICC_AP0R3_EL1);
+ write_gicreg(0, ICC_AP0R2_EL1);
+ fallthrough;
+ case 6:
+ write_gicreg(0, ICC_AP0R1_EL1);
+ fallthrough;
+ case 5:
+ case 4:
+ write_gicreg(0, ICC_AP0R0_EL1);
+ }
+
+ isb();
+ }
+
+ switch(pribits) {
+ case 8:
+ case 7:
+ write_gicreg(0, ICC_AP1R3_EL1);
+ write_gicreg(0, ICC_AP1R2_EL1);
+ fallthrough;
+ case 6:
+ write_gicreg(0, ICC_AP1R1_EL1);
+ fallthrough;
+ case 5:
+ case 4:
+ write_gicreg(0, ICC_AP1R0_EL1);
+ }
+
+ isb();
+
+ /* ... and let's hit the road... */
+ gic_write_grpen1(1);
+
+ /* Keep the RSS capability status in per_cpu variable */
+ per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
+
+ /* Check all the CPUs have capable of sending SGIs to other CPUs */
+ for_each_online_cpu(i) {
+ bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
+
+ need_rss |= MPIDR_RS(cpu_logical_map(i));
+ if (need_rss && (!have_rss))
+ pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
+ cpu, (unsigned long)mpidr,
+ i, (unsigned long)cpu_logical_map(i));
+ }
+
+ /**
+ * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
+ * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
+ * UNPREDICTABLE choice of :
+ * - The write is ignored.
+ * - The RS field is treated as 0.
+ */
+ if (need_rss && (!gic_data.has_rss))
+ pr_crit_once("RSS is required but GICD doesn't support it\n");
+}
+
+static bool gicv3_nolpi;
+
+static int __init gicv3_nolpi_cfg(char *buf)
+{
+ return strtobool(buf, &gicv3_nolpi);
+}
+early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
+
+static int gic_dist_supports_lpis(void)
+{
+ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
+ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
+ !gicv3_nolpi);
+}
+
+static void gic_cpu_init(void)
+{
+ void __iomem *rbase;
+ int i;
+
+ /* Register ourselves with the rest of the world */
+ if (gic_populate_rdist())
+ return;
+
+ gic_enable_redist(true);
+
+ WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
+ !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
+ "Distributor has extended ranges, but CPU%d doesn't\n",
+ smp_processor_id());
+
+ rbase = gic_data_rdist_sgi_base();
+
+ /* Configure SGIs/PPIs as non-secure Group-1 */
+ for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
+ writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
+
+ gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
+
+ /* initialise system registers */
+ gic_cpu_sys_reg_init();
+}
+
+#ifdef CONFIG_SMP
+
+#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
+#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+ gic_cpu_init();
+
+ if (gic_dist_supports_lpis())
+ its_cpu_init();
+
+ return 0;
+}
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+ unsigned long cluster_id)
+{
+ int next_cpu, cpu = *base_cpu;
+ unsigned long mpidr = cpu_logical_map(cpu);
+ u16 tlist = 0;
+
+ while (cpu < nr_cpu_ids) {
+ tlist |= 1 << (mpidr & 0xf);
+
+ next_cpu = cpumask_next(cpu, mask);
+ if (next_cpu >= nr_cpu_ids)
+ goto out;
+ cpu = next_cpu;
+
+ mpidr = cpu_logical_map(cpu);
+
+ if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
+ cpu--;
+ goto out;
+ }
+ }
+out:
+ *base_cpu = cpu;
+ return tlist;
+}
+
+#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
+ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
+ << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+ u64 val;
+
+ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
+ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
+ irq << ICC_SGI1R_SGI_ID_SHIFT |
+ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
+ MPIDR_TO_SGI_RS(cluster_id) |
+ tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
+
+ pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+ gic_write_sgi1r(val);
+}
+
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+ int cpu;
+
+ if (WARN_ON(d->hwirq >= 16))
+ return;
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb(ishst);
+
+ for_each_cpu(cpu, mask) {
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
+ u16 tlist;
+
+ tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+ gic_send_sgi(cluster_id, tlist, d->hwirq);
+ }
+
+ /* Force the above writes to ICC_SGI1R_EL1 to be executed */
+ isb();
+}
+
+static void __init gic_smp_init(void)
+{
+ struct irq_fwspec sgi_fwspec = {
+ .fwnode = gic_data.fwnode,
+ .param_count = 1,
+ };
+ int base_sgi;
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gicv3:starting",
+ gic_starting_cpu, NULL);
+
+ /* Register all 8 non-secure SGIs */
+ base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
+ NUMA_NO_NODE, &sgi_fwspec,
+ false, NULL);
+ if (WARN_ON(base_sgi <= 0))
+ return;
+
+ set_smp_ipi_range(base_sgi, 8);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu;
+ u32 offset, index;
+ void __iomem *reg;
+ int enabled;
+ u64 val;
+
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (gic_irq_in_rdist(d))
+ return -EINVAL;
+
+ /* If interrupt was enabled, disable it first */
+ enabled = gic_peek_irq(d, GICD_ISENABLER);
+ if (enabled)
+ gic_mask_irq(d);
+
+ offset = convert_offset_index(d, GICD_IROUTER, &index);
+ reg = gic_dist_base(d) + offset + (index * 8);
+ val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+ gic_write_irouter(val, reg);
+
+ /*
+ * If the interrupt was enabled, enabled it again. Otherwise,
+ * just wait for the distributor to have digested our changes.
+ */
+ if (enabled)
+ gic_unmask_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#else
+#define gic_set_affinity NULL
+#define gic_ipi_send_mask NULL
+#define gic_smp_init() do { } while(0)
+#endif
+
+static int gic_retrigger(struct irq_data *data)
+{
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
+}
+
+#ifdef CONFIG_CPU_PM
+static int gic_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ if (cmd == CPU_PM_EXIT) {
+ if (gic_dist_security_disabled())
+ gic_enable_redist(true);
+ gic_cpu_sys_reg_init();
+ } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
+ gic_write_grpen1(0);
+ gic_enable_redist(false);
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_cpu_pm_notifier_block = {
+ .notifier_call = gic_cpu_pm_notifier,
+};
+
+static void gic_cpu_pm_init(void)
+{
+ cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
+}
+
+#else
+static inline void gic_cpu_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+static struct irq_chip gic_chip = {
+ .name = "GICv3",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_set_affinity = gic_set_affinity,
+ .irq_retrigger = gic_retrigger,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_nmi_setup = gic_irq_nmi_setup,
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static struct irq_chip gic_eoimode1_chip = {
+ .name = "GICv3",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_set_affinity = gic_set_affinity,
+ .irq_retrigger = gic_retrigger,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .irq_nmi_setup = gic_irq_nmi_setup,
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip = &gic_chip;
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
+
+ if (static_branch_likely(&supports_deactivate_key))
+ chip = &gic_eoimode1_chip;
+
+ switch (__get_intid_range(hw)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+ break;
+
+ case SPI_RANGE:
+ case ESPI_RANGE:
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ irq_set_probe(irq);
+ irqd_set_single_target(irqd);
+ break;
+
+ case LPI_RANGE:
+ if (!gic_dist_supports_lpis())
+ return -EPERM;
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ break;
+
+ default:
+ return -EPERM;
+ }
+
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
+ irqd_set_handle_enforce_irqctx(irqd);
+ return 0;
+}
+
+static int gic_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count < 3)
+ return -EINVAL;
+
+ switch (fwspec->param[0]) {
+ case 0: /* SPI */
+ *hwirq = fwspec->param[1] + 32;
+ break;
+ case 1: /* PPI */
+ *hwirq = fwspec->param[1] + 16;
+ break;
+ case 2: /* ESPI */
+ *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
+ break;
+ case 3: /* EPPI */
+ *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
+ break;
+ case GIC_IRQ_TYPE_LPI: /* LPI */
+ *hwirq = fwspec->param[1];
+ break;
+ case GIC_IRQ_TYPE_PARTITION:
+ *hwirq = fwspec->param[1];
+ if (fwspec->param[1] >= 16)
+ *hwirq += EPPI_BASE_INTID - 16;
+ else
+ *hwirq += 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * Make it clear that broken DTs are... broken.
+ * Partitioned PPIs are an unfortunate exception.
+ */
+ WARN_ON(*type == IRQ_TYPE_NONE &&
+ fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
+ irq_hw_number_t hwirq)
+{
+ enum gic_intid_range range;
+
+ if (!gic_data.ppi_descs)
+ return false;
+
+ if (!is_of_node(fwspec->fwnode))
+ return false;
+
+ if (fwspec->param_count < 4 || !fwspec->param[3])
+ return false;
+
+ range = __get_intid_range(hwirq);
+ if (range != PPI_RANGE && range != EPPI_RANGE)
+ return false;
+
+ return true;
+}
+
+static int gic_irq_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ unsigned int type, ret, ppi_idx;
+ irq_hw_number_t hwirq;
+
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return 0;
+
+ /* If this is not DT, then we have a single domain */
+ if (!is_of_node(fwspec->fwnode))
+ return 1;
+
+ ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (WARN_ON_ONCE(ret))
+ return 0;
+
+ if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
+ return d == gic_data.domain;
+
+ /*
+ * If this is a PPI and we have a 4th (non-null) parameter,
+ * then we need to match the partition domain.
+ */
+ ppi_idx = __gic_get_ppi_index(hwirq);
+ return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .translate = gic_irq_domain_translate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
+ .select = gic_irq_domain_select,
+};
+
+static int partition_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ unsigned long ppi_intid;
+ struct device_node *np;
+ unsigned int ppi_idx;
+ int ret;
+
+ if (!gic_data.ppi_descs)
+ return -ENOMEM;
+
+ np = of_find_node_by_phandle(fwspec->param[3]);
+ if (WARN_ON(!np))
+ return -EINVAL;
+
+ ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
+ if (WARN_ON_ONCE(ret))
+ return 0;
+
+ ppi_idx = __gic_get_ppi_index(ppi_intid);
+ ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
+ of_node_to_fwnode(np));
+ if (ret < 0)
+ return ret;
+
+ *hwirq = ret;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops partition_domain_ops = {
+ .translate = partition_domain_translate,
+ .select = gic_irq_domain_select,
+};
+
+static bool gic_enable_quirk_msm8996(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
+
+ return true;
+}
+
+static bool gic_enable_quirk_mtk_gicr(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
+
+ return true;
+}
+
+static bool gic_enable_quirk_cavium_38539(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
+
+ return true;
+}
+
+static bool gic_enable_quirk_hip06_07(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ /*
+ * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
+ * not being an actual ARM implementation). The saving grace is
+ * that GIC-600 doesn't have ESPI, so nothing to do in that case.
+ * HIP07 doesn't even have a proper IIDR, and still pretends to
+ * have ESPI. In both cases, put them right.
+ */
+ if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
+ /* Zero both ESPI and the RES0 field next to it... */
+ d->rdists.gicd_typer &= ~GENMASK(9, 8);
+ return true;
+ }
+
+ return false;
+}
+
+#define T241_CHIPN_MASK GENMASK_ULL(45, 44)
+#define T241_CHIP_GICDA_OFFSET 0x1580000
+#define SMCCC_SOC_ID_T241 0x036b0241
+
+static bool gic_enable_quirk_nvidia_t241(void *data)
+{
+ s32 soc_id = arm_smccc_get_soc_id_version();
+ unsigned long chip_bmask = 0;
+ phys_addr_t phys;
+ u32 i;
+
+ /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
+ if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
+ return false;
+
+ /* Find the chips based on GICR regions PHYS addr */
+ for (i = 0; i < gic_data.nr_redist_regions; i++) {
+ chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
+ (u64)gic_data.redist_regions[i].phys_base));
+ }
+
+ if (hweight32(chip_bmask) < 3)
+ return false;
+
+ /* Setup GICD alias regions */
+ for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
+ if (chip_bmask & BIT(i)) {
+ phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
+ phys |= FIELD_PREP(T241_CHIPN_MASK, i);
+ t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
+ WARN_ON_ONCE(!t241_dist_base_alias[i]);
+ }
+ }
+ static_branch_enable(&gic_nvidia_t241_erratum);
+ return true;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "GICv3: Qualcomm MSM8996 broken firmware",
+ .compatible = "qcom,msm8996-gic-v3",
+ .init = gic_enable_quirk_msm8996,
+ },
+ {
+ .desc = "GICv3: Mediatek Chromebook GICR save problem",
+ .property = "mediatek,broken-save-restore-fw",
+ .init = gic_enable_quirk_mtk_gicr,
+ },
+ {
+ .desc = "GICv3: HIP06 erratum 161010803",
+ .iidr = 0x0204043b,
+ .mask = 0xffffffff,
+ .init = gic_enable_quirk_hip06_07,
+ },
+ {
+ .desc = "GICv3: HIP07 erratum 161010803",
+ .iidr = 0x00000000,
+ .mask = 0xffffffff,
+ .init = gic_enable_quirk_hip06_07,
+ },
+ {
+ /*
+ * Reserved register accesses generate a Synchronous
+ * External Abort. This erratum applies to:
+ * - ThunderX: CN88xx
+ * - OCTEON TX: CN83xx, CN81xx
+ * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
+ */
+ .desc = "GICv3: Cavium erratum 38539",
+ .iidr = 0xa000034c,
+ .mask = 0xe8f00fff,
+ .init = gic_enable_quirk_cavium_38539,
+ },
+ {
+ .desc = "GICv3: NVIDIA erratum T241-FABRIC-4",
+ .iidr = 0x0402043b,
+ .mask = 0xffffffff,
+ .init = gic_enable_quirk_nvidia_t241,
+ },
+ {
+ }
+};
+
+static void gic_enable_nmi_support(void)
+{
+ int i;
+
+ if (!gic_prio_masking_enabled())
+ return;
+
+ if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
+ pr_warn("Skipping NMI enable due to firmware issues\n");
+ return;
+ }
+
+ ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
+ if (!ppi_nmi_refs)
+ return;
+
+ for (i = 0; i < gic_data.ppi_nr; i++)
+ refcount_set(&ppi_nmi_refs[i], 0);
+
+ /*
+ * Linux itself doesn't use 1:N distribution, so has no need to
+ * set PMHE. The only reason to have it set is if EL3 requires it
+ * (and we can't change it).
+ */
+ if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
+ static_branch_enable(&gic_pmr_sync);
+
+ pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
+ static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
+
+ /*
+ * How priority values are used by the GIC depends on two things:
+ * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
+ * and if Group 0 interrupts can be delivered to Linux in the non-secure
+ * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
+ * ICC_PMR_EL1 register and the priority that software assigns to
+ * interrupts:
+ *
+ * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
+ * -----------------------------------------------------------
+ * 1 | - | unchanged | unchanged
+ * -----------------------------------------------------------
+ * 0 | 1 | non-secure | non-secure
+ * -----------------------------------------------------------
+ * 0 | 0 | unchanged | non-secure
+ *
+ * where non-secure means that the value is right-shifted by one and the
+ * MSB bit set, to make it fit in the non-secure priority range.
+ *
+ * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
+ * are both either modified or unchanged, we can use the same set of
+ * priorities.
+ *
+ * In the last case, where only the interrupt priorities are modified to
+ * be in the non-secure range, we use a different PMR value to mask IRQs
+ * and the rest of the values that we use remain unchanged.
+ */
+ if (gic_has_group0() && !gic_dist_security_disabled())
+ static_branch_enable(&gic_nonsecure_priorities);
+
+ static_branch_enable(&supports_pseudo_nmis);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
+ else
+ gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
+}
+
+static int __init gic_init_bases(phys_addr_t dist_phys_base,
+ void __iomem *dist_base,
+ struct redist_region *rdist_regs,
+ u32 nr_redist_regions,
+ u64 redist_stride,
+ struct fwnode_handle *handle)
+{
+ u32 typer;
+ int err;
+
+ if (!is_hyp_mode_available())
+ static_branch_disable(&supports_deactivate_key);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
+
+ gic_data.fwnode = handle;
+ gic_data.dist_phys_base = dist_phys_base;
+ gic_data.dist_base = dist_base;
+ gic_data.redist_regions = rdist_regs;
+ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+ /*
+ * Find out how many interrupts are supported.
+ */
+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+ gic_data.rdists.gicd_typer = typer;
+
+ gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
+ gic_quirks, &gic_data);
+
+ pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
+ pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
+
+ /*
+ * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
+ * architecture spec (which says that reserved registers are RES0).
+ */
+ if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
+ gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+
+ gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+ &gic_data);
+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
+ /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
+ gic_data.rdists.has_rvpeid = true;
+ gic_data.rdists.has_vlpis = true;
+ gic_data.rdists.has_direct_lpi = true;
+ gic_data.rdists.has_vpend_valid_dirty = true;
+ }
+
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
+
+ gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
+
+ if (typer & GICD_TYPER_MBIS) {
+ err = mbi_init(handle, gic_data.domain);
+ if (err)
+ pr_err("Failed to initialize MBIs\n");
+ }
+
+ set_handle_irq(gic_handle_irq);
+
+ gic_update_rdist_properties();
+
+ gic_dist_init();
+ gic_cpu_init();
+ gic_smp_init();
+ gic_cpu_pm_init();
+
+ if (gic_dist_supports_lpis()) {
+ its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_cpu_init();
+ its_lpi_memreserve_init();
+ } else {
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_init(handle, gic_data.domain);
+ }
+
+ gic_enable_nmi_support();
+
+ return 0;
+
+out_free:
+ if (gic_data.domain)
+ irq_domain_remove(gic_data.domain);
+ free_percpu(gic_data.rdists.rdist);
+ return err;
+}
+
+static int __init gic_validate_dist_version(void __iomem *dist_base)
+{
+ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+
+ if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Create all possible partitions at boot time */
+static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
+{
+ struct device_node *parts_node, *child_part;
+ int part_idx = 0, i;
+ int nr_parts;
+ struct partition_affinity *parts;
+
+ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
+ if (!parts_node)
+ return;
+
+ gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
+ if (!gic_data.ppi_descs)
+ goto out_put_node;
+
+ nr_parts = of_get_child_count(parts_node);
+
+ if (!nr_parts)
+ goto out_put_node;
+
+ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
+ if (WARN_ON(!parts))
+ goto out_put_node;
+
+ for_each_child_of_node(parts_node, child_part) {
+ struct partition_affinity *part;
+ int n;
+
+ part = &parts[part_idx];
+
+ part->partition_id = of_node_to_fwnode(child_part);
+
+ pr_info("GIC: PPI partition %pOFn[%d] { ",
+ child_part, part_idx);
+
+ n = of_property_count_elems_of_size(child_part, "affinity",
+ sizeof(u32));
+ WARN_ON(n <= 0);
+
+ for (i = 0; i < n; i++) {
+ int err, cpu;
+ u32 cpu_phandle;
+ struct device_node *cpu_node;
+
+ err = of_property_read_u32_index(child_part, "affinity",
+ i, &cpu_phandle);
+ if (WARN_ON(err))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0)) {
+ of_node_put(cpu_node);
+ continue;
+ }
+
+ pr_cont("%pOF[%d] ", cpu_node, cpu);
+
+ cpumask_set_cpu(cpu, &part->mask);
+ of_node_put(cpu_node);
+ }
+
+ pr_cont("}\n");
+ part_idx++;
+ }
+
+ for (i = 0; i < gic_data.ppi_nr; i++) {
+ unsigned int irq;
+ struct partition_desc *desc;
+ struct irq_fwspec ppi_fwspec = {
+ .fwnode = gic_data.fwnode,
+ .param_count = 3,
+ .param = {
+ [0] = GIC_IRQ_TYPE_PARTITION,
+ [1] = i,
+ [2] = IRQ_TYPE_NONE,
+ },
+ };
+
+ irq = irq_create_fwspec_mapping(&ppi_fwspec);
+ if (WARN_ON(!irq))
+ continue;
+ desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
+ irq, &partition_domain_ops);
+ if (WARN_ON(!desc))
+ continue;
+
+ gic_data.ppi_descs[i] = desc;
+ }
+
+out_put_node:
+ of_node_put(parts_node);
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+ int ret;
+ struct resource r;
+ u32 gicv_idx;
+
+ gic_v3_kvm_info.type = GIC_V3;
+
+ gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_v3_kvm_info.maint_irq)
+ return;
+
+ if (of_property_read_u32(node, "#redistributor-regions",
+ &gicv_idx))
+ gicv_idx = 1;
+
+ gicv_idx += 3; /* Also skip GICD, GICC, GICH */
+ ret = of_address_to_resource(node, gicv_idx, &r);
+ if (!ret)
+ gic_v3_kvm_info.vcpu = r;
+
+ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
+ vgic_set_kvm_info(&gic_v3_kvm_info);
+}
+
+static void gic_request_region(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ if (!request_mem_region(base, size, name))
+ pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
+ name, &base);
+}
+
+static void __iomem *gic_of_iomap(struct device_node *node, int idx,
+ const char *name, struct resource *res)
+{
+ void __iomem *base;
+ int ret;
+
+ ret = of_address_to_resource(node, idx, res);
+ if (ret)
+ return IOMEM_ERR_PTR(ret);
+
+ gic_request_region(res->start, resource_size(res), name);
+ base = of_iomap(node, idx);
+
+ return base ?: IOMEM_ERR_PTR(-ENOMEM);
+}
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ phys_addr_t dist_phys_base;
+ void __iomem *dist_base;
+ struct redist_region *rdist_regs;
+ struct resource res;
+ u64 redist_stride;
+ u32 nr_redist_regions;
+ int err, i;
+
+ dist_base = gic_of_iomap(node, 0, "GICD", &res);
+ if (IS_ERR(dist_base)) {
+ pr_err("%pOF: unable to map gic dist registers\n", node);
+ return PTR_ERR(dist_base);
+ }
+
+ dist_phys_base = res.start;
+
+ err = gic_validate_dist_version(dist_base);
+ if (err) {
+ pr_err("%pOF: no distributor detected, giving up\n", node);
+ goto out_unmap_dist;
+ }
+
+ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
+ nr_redist_regions = 1;
+
+ rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
+ GFP_KERNEL);
+ if (!rdist_regs) {
+ err = -ENOMEM;
+ goto out_unmap_dist;
+ }
+
+ for (i = 0; i < nr_redist_regions; i++) {
+ rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
+ if (IS_ERR(rdist_regs[i].redist_base)) {
+ pr_err("%pOF: couldn't map region %d\n", node, i);
+ err = -ENODEV;
+ goto out_unmap_rdist;
+ }
+ rdist_regs[i].phys_base = res.start;
+ }
+
+ if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+ redist_stride = 0;
+
+ gic_enable_of_quirks(node, gic_quirks, &gic_data);
+
+ err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
+ nr_redist_regions, redist_stride, &node->fwnode);
+ if (err)
+ goto out_unmap_rdist;
+
+ gic_populate_ppi_partitions(node);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_of_setup_kvm_info(node);
+ return 0;
+
+out_unmap_rdist:
+ for (i = 0; i < nr_redist_regions; i++)
+ if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
+ iounmap(rdist_regs[i].redist_base);
+ kfree(rdist_regs);
+out_unmap_dist:
+ iounmap(dist_base);
+ return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
+
+#ifdef CONFIG_ACPI
+static struct
+{
+ void __iomem *dist_base;
+ struct redist_region *redist_regs;
+ u32 nr_redist_regions;
+ bool single_redist;
+ int enabled_rdists;
+ u32 maint_irq;
+ int maint_irq_mode;
+ phys_addr_t vcpu_base;
+} acpi_data __initdata;
+
+static void __init
+gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
+{
+ static int count = 0;
+
+ acpi_data.redist_regs[count].phys_base = phys_base;
+ acpi_data.redist_regs[count].redist_base = redist_base;
+ acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
+ count++;
+}
+
+static int __init
+gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_redistributor *redist =
+ (struct acpi_madt_generic_redistributor *)header;
+ void __iomem *redist_base;
+
+ redist_base = ioremap(redist->base_address, redist->length);
+ if (!redist_base) {
+ pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
+ return -ENOMEM;
+ }
+ gic_request_region(redist->base_address, redist->length, "GICR");
+
+ gic_acpi_register_redist(redist->base_address, redist_base);
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+ u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+ void __iomem *redist_base;
+
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
+ redist_base = ioremap(gicc->gicr_base_address, size);
+ if (!redist_base)
+ return -ENOMEM;
+ gic_request_region(gicc->gicr_base_address, size, "GICR");
+
+ gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
+ return 0;
+}
+
+static int __init gic_acpi_collect_gicr_base(void)
+{
+ acpi_tbl_entry_handler redist_parser;
+ enum acpi_madt_type type;
+
+ if (acpi_data.single_redist) {
+ type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
+ redist_parser = gic_acpi_parse_madt_gicc;
+ } else {
+ type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
+ redist_parser = gic_acpi_parse_madt_redist;
+ }
+
+ /* Collect redistributor base addresses in GICR entries */
+ if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
+ return 0;
+
+ pr_info("No valid GICR entries exist\n");
+ return -ENODEV;
+}
+
+static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ /* Subtable presence means that redist exists, that's it */
+ return 0;
+}
+
+static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+
+ /*
+ * If GICC is enabled and has valid gicr base address, then it means
+ * GICR base is presented via GICC
+ */
+ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
+ acpi_data.enabled_rdists++;
+ return 0;
+ }
+
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
+ return -ENODEV;
+}
+
+static int __init gic_acpi_count_gicr_regions(void)
+{
+ int count;
+
+ /*
+ * Count how many redistributor regions we have. It is not allowed
+ * to mix redistributor description, GICR and GICC subtables have to be
+ * mutually exclusive.
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+ gic_acpi_match_gicr, 0);
+ if (count > 0) {
+ acpi_data.single_redist = false;
+ return count;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_match_gicc, 0);
+ if (count > 0) {
+ acpi_data.single_redist = true;
+ count = acpi_data.enabled_rdists;
+ }
+
+ return count;
+}
+
+static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
+ struct acpi_probe_entry *ape)
+{
+ struct acpi_madt_generic_distributor *dist;
+ int count;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+ if (dist->version != ape->driver_data)
+ return false;
+
+ /* We need to do that exercise anyway, the sooner the better */
+ count = gic_acpi_count_gicr_regions();
+ if (count <= 0)
+ return false;
+
+ acpi_data.nr_redist_regions = count;
+ return true;
+}
+
+static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+ int maint_irq_mode;
+ static int first_madt = true;
+
+ /* Skip unusable CPUs */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
+ maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+
+ if (first_madt) {
+ first_madt = false;
+
+ acpi_data.maint_irq = gicc->vgic_interrupt;
+ acpi_data.maint_irq_mode = maint_irq_mode;
+ acpi_data.vcpu_base = gicc->gicv_base_address;
+
+ return 0;
+ }
+
+ /*
+ * The maintenance interrupt and GICV should be the same for every CPU
+ */
+ if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
+ (acpi_data.maint_irq_mode != maint_irq_mode) ||
+ (acpi_data.vcpu_base != gicc->gicv_base_address))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool __init gic_acpi_collect_virt_info(void)
+{
+ int count;
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_parse_virt_madt_gicc, 0);
+
+ return (count > 0);
+}
+
+#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+ int irq;
+
+ if (!gic_acpi_collect_virt_info()) {
+ pr_warn("Unable to get hardware information used for virtualization\n");
+ return;
+ }
+
+ gic_v3_kvm_info.type = GIC_V3;
+
+ irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+ acpi_data.maint_irq_mode,
+ ACPI_ACTIVE_HIGH);
+ if (irq <= 0)
+ return;
+
+ gic_v3_kvm_info.maint_irq = irq;
+
+ if (acpi_data.vcpu_base) {
+ struct resource *vcpu = &gic_v3_kvm_info.vcpu;
+
+ vcpu->flags = IORESOURCE_MEM;
+ vcpu->start = acpi_data.vcpu_base;
+ vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+ }
+
+ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
+ vgic_set_kvm_info(&gic_v3_kvm_info);
+}
+
+static struct fwnode_handle *gsi_domain_handle;
+
+static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
+{
+ return gsi_domain_handle;
+}
+
+static int __init
+gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+ size_t size;
+ int i, err;
+
+ /* Get distributor base address */
+ dist = (struct acpi_madt_generic_distributor *)header;
+ acpi_data.dist_base = ioremap(dist->base_address,
+ ACPI_GICV3_DIST_MEM_SIZE);
+ if (!acpi_data.dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ return -ENOMEM;
+ }
+ gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
+
+ err = gic_validate_dist_version(acpi_data.dist_base);
+ if (err) {
+ pr_err("No distributor detected at @%p, giving up\n",
+ acpi_data.dist_base);
+ goto out_dist_unmap;
+ }
+
+ size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
+ acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
+ if (!acpi_data.redist_regs) {
+ err = -ENOMEM;
+ goto out_dist_unmap;
+ }
+
+ err = gic_acpi_collect_gicr_base();
+ if (err)
+ goto out_redist_unmap;
+
+ gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
+ if (!gsi_domain_handle) {
+ err = -ENOMEM;
+ goto out_redist_unmap;
+ }
+
+ err = gic_init_bases(dist->base_address, acpi_data.dist_base,
+ acpi_data.redist_regs, acpi_data.nr_redist_regions,
+ 0, gsi_domain_handle);
+ if (err)
+ goto out_fwhandle_free;
+
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_acpi_setup_kvm_info();
+
+ return 0;
+
+out_fwhandle_free:
+ irq_domain_free_fwnode(gsi_domain_handle);
+out_redist_unmap:
+ for (i = 0; i < acpi_data.nr_redist_regions; i++)
+ if (acpi_data.redist_regs[i].redist_base)
+ iounmap(acpi_data.redist_regs[i].redist_base);
+ kfree(acpi_data.redist_regs);
+out_dist_unmap:
+ iounmap(acpi_data.dist_base);
+ return err;
+}
+IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
+ gic_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
new file mode 100644
index 000000000..a6277dea4
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/sched.h>
+
+#include <linux/irqchip/arm-gic-v4.h>
+
+/*
+ * WARNING: The blurb below assumes that you understand the
+ * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
+ * translated into GICv4 commands. So it effectively targets at most
+ * two individuals. You know who you are.
+ *
+ * The core GICv4 code is designed to *avoid* exposing too much of the
+ * core GIC code (that would in turn leak into the hypervisor code),
+ * and instead provide a hypervisor agnostic interface to the HW (of
+ * course, the astute reader will quickly realize that hypervisor
+ * agnostic actually means KVM-specific - what were you thinking?).
+ *
+ * In order to achieve a modicum of isolation, we try to hide most of
+ * the GICv4 "stuff" behind normal irqchip operations:
+ *
+ * - Any guest-visible VLPI is backed by a Linux interrupt (and a
+ * physical LPI which gets unmapped when the guest maps the
+ * VLPI). This allows the same DevID/EventID pair to be either
+ * mapped to the LPI (host) or the VLPI (guest). Note that this is
+ * exclusive, and you cannot have both.
+ *
+ * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
+ *
+ * - Guest INT/CLEAR commands are implemented through
+ * irq_set_irqchip_state().
+ *
+ * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
+ * issuing an INV after changing a priority) gets shoved into the
+ * irq_set_vcpu_affinity() method. While this is quite horrible
+ * (let's face it, this is the irqchip version of an ioctl), it
+ * confines the crap to a single location. And map/unmap really is
+ * about setting the affinity of a VLPI to a vcpu, so only INV is
+ * majorly out of place. So there.
+ *
+ * A number of commands are simply not provided by this interface, as
+ * they do not make direct sense. For example, MAPD is purely local to
+ * the virtual ITS (because it references a virtual device, and the
+ * physical ITS is still very much in charge of the physical
+ * device). Same goes for things like MAPC (the physical ITS deals
+ * with the actual vPE affinity, and not the braindead concept of
+ * collection). SYNC is not provided either, as each and every command
+ * is followed by a VSYNC. This could be relaxed in the future, should
+ * this be seen as a bottleneck (yes, this means *never*).
+ *
+ * But handling VLPIs is only one side of the job of the GICv4
+ * code. The other (darker) side is to take care of the doorbell
+ * interrupts which are delivered when a VLPI targeting a non-running
+ * vcpu is being made pending.
+ *
+ * The choice made here is that each vcpu (VPE in old northern GICv4
+ * dialect) gets a single doorbell LPI, no matter how many interrupts
+ * are targeting it. This has a nice property, which is that the
+ * interrupt becomes a handle for the VPE, and that the hypervisor
+ * code can manipulate it through the normal interrupt API:
+ *
+ * - VMs (or rather the VM abstraction that matters to the GIC)
+ * contain an irq domain where each interrupt maps to a VPE. In
+ * turn, this domain sits on top of the normal LPI allocator, and a
+ * specially crafted irq_chip implementation.
+ *
+ * - mask/unmask do what is expected on the doorbell interrupt.
+ *
+ * - irq_set_affinity is used to move a VPE from one redistributor to
+ * another.
+ *
+ * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
+ * creating a new sub-API, namely scheduling/descheduling a VPE
+ * (which involves programming GICR_V{PROP,PEND}BASER) and
+ * performing INVALL operations.
+ */
+
+static struct irq_domain *gic_domain;
+static const struct irq_domain_ops *vpe_domain_ops;
+static const struct irq_domain_ops *sgi_domain_ops;
+
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+bool gic_cpuif_has_vsgi(void)
+{
+ unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
+
+ return fld >= 0x3;
+}
+#else
+bool gic_cpuif_has_vsgi(void)
+{
+ return false;
+}
+#endif
+
+static bool has_v4_1(void)
+{
+ return !!sgi_domain_ops;
+}
+
+static bool has_v4_1_sgi(void)
+{
+ return has_v4_1() && gic_cpuif_has_vsgi();
+}
+
+static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
+{
+ char *name;
+ int sgi_base;
+
+ if (!has_v4_1_sgi())
+ return 0;
+
+ name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
+ if (!name)
+ goto err;
+
+ vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
+ if (!vpe->fwnode)
+ goto err;
+
+ kfree(name);
+ name = NULL;
+
+ vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
+ sgi_domain_ops, vpe);
+ if (!vpe->sgi_domain)
+ goto err;
+
+ sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
+ NUMA_NO_NODE, vpe,
+ false, NULL);
+ if (sgi_base <= 0)
+ goto err;
+
+ return 0;
+
+err:
+ if (vpe->sgi_domain)
+ irq_domain_remove(vpe->sgi_domain);
+ if (vpe->fwnode)
+ irq_domain_free_fwnode(vpe->fwnode);
+ kfree(name);
+ return -ENOMEM;
+}
+
+int its_alloc_vcpu_irqs(struct its_vm *vm)
+{
+ int vpe_base_irq, i;
+
+ vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
+ task_pid_nr(current));
+ if (!vm->fwnode)
+ goto err;
+
+ vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
+ vm->fwnode, vpe_domain_ops,
+ vm);
+ if (!vm->domain)
+ goto err;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ vm->vpes[i]->its_vm = vm;
+ vm->vpes[i]->idai = true;
+ }
+
+ vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
+ NUMA_NO_NODE, vm,
+ false, NULL);
+ if (vpe_base_irq <= 0)
+ goto err;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ int ret;
+ vm->vpes[i]->irq = vpe_base_irq + i;
+ ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (vm->domain)
+ irq_domain_remove(vm->domain);
+ if (vm->fwnode)
+ irq_domain_free_fwnode(vm->fwnode);
+
+ return -ENOMEM;
+}
+
+static void its_free_sgi_irqs(struct its_vm *vm)
+{
+ int i;
+
+ if (!has_v4_1_sgi())
+ return;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
+
+ if (WARN_ON(!irq))
+ continue;
+
+ irq_domain_free_irqs(irq, 16);
+ irq_domain_remove(vm->vpes[i]->sgi_domain);
+ irq_domain_free_fwnode(vm->vpes[i]->fwnode);
+ }
+}
+
+void its_free_vcpu_irqs(struct its_vm *vm)
+{
+ its_free_sgi_irqs(vm);
+ irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
+ irq_domain_remove(vm->domain);
+ irq_domain_free_fwnode(vm->fwnode);
+}
+
+static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
+{
+ return irq_set_vcpu_affinity(vpe->irq, info);
+}
+
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
+{
+ struct irq_desc *desc = irq_to_desc(vpe->irq);
+ struct its_cmd_info info = { };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ info.cmd_type = DESCHEDULE_VPE;
+ if (has_v4_1()) {
+ /* GICv4.1 can directly deal with doorbells */
+ info.req_db = db;
+ } else {
+ /* Undo the nested disable_irq() calls... */
+ while (db && irqd_irq_disabled(&desc->irq_data))
+ enable_irq(vpe->irq);
+ }
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = false;
+
+ vpe->ready = false;
+
+ return ret;
+}
+
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
+{
+ struct its_cmd_info info = { };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ info.cmd_type = SCHEDULE_VPE;
+ if (has_v4_1()) {
+ info.g0en = g0en;
+ info.g1en = g1en;
+ } else {
+ /* Disabled the doorbell, as we're about to enter the guest */
+ disable_irq_nosync(vpe->irq);
+ }
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = true;
+
+ return ret;
+}
+
+int its_commit_vpe(struct its_vpe *vpe)
+{
+ struct its_cmd_info info = {
+ .cmd_type = COMMIT_VPE,
+ };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->ready = true;
+
+ return ret;
+}
+
+
+int its_invall_vpe(struct its_vpe *vpe)
+{
+ struct its_cmd_info info = {
+ .cmd_type = INVALL_VPE,
+ };
+
+ return its_send_vpe_cmd(vpe, &info);
+}
+
+int its_map_vlpi(int irq, struct its_vlpi_map *map)
+{
+ struct its_cmd_info info = {
+ .cmd_type = MAP_VLPI,
+ {
+ .map = map,
+ },
+ };
+ int ret;
+
+ /*
+ * The host will never see that interrupt firing again, so it
+ * is vital that we don't do any lazy masking.
+ */
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ ret = irq_set_vcpu_affinity(irq, &info);
+ if (ret)
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return ret;
+}
+
+int its_get_vlpi(int irq, struct its_vlpi_map *map)
+{
+ struct its_cmd_info info = {
+ .cmd_type = GET_VLPI,
+ {
+ .map = map,
+ },
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_unmap_vlpi(int irq)
+{
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ return irq_set_vcpu_affinity(irq, NULL);
+}
+
+int its_prop_update_vlpi(int irq, u8 config, bool inv)
+{
+ struct its_cmd_info info = {
+ .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
+ {
+ .config = config,
+ },
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_prop_update_vsgi(int irq, u8 priority, bool group)
+{
+ struct its_cmd_info info = {
+ .cmd_type = PROP_UPDATE_VSGI,
+ {
+ .priority = priority,
+ .group = group,
+ },
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops)
+{
+ if (domain) {
+ pr_info("ITS: Enabling GICv4 support\n");
+ gic_domain = domain;
+ vpe_domain_ops = vpe_ops;
+ sgi_domain_ops = sgi_ops;
+ return 0;
+ }
+
+ pr_err("ITS: No GICv4 VPE domain allocated\n");
+ return -ENODEV;
+}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
new file mode 100644
index 000000000..4c7bae0ec
--- /dev/null
+++ b/drivers/irqchip/irq-gic.c
@@ -0,0 +1,1764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU. The base address of the CPU interface is usually
+ * aliased so that the same address points to different chips depending
+ * on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/virt.h>
+
+#include "irq-gic-common.h"
+
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+static void gic_check_cpu_features(void)
+{
+ WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+ TAINT_CPU_OUT_OF_SPEC,
+ "GICv3 system registers enabled, broken firmware!\n");
+}
+#else
+#define gic_check_cpu_features() do { } while(0)
+#endif
+
+union gic_base {
+ void __iomem *common_base;
+ void __percpu * __iomem *percpu_base;
+};
+
+struct gic_chip_data {
+ union gic_base dist_base;
+ union gic_base cpu_base;
+ void __iomem *raw_dist_base;
+ void __iomem *raw_cpu_base;
+ u32 percpu_offset;
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_active;
+ u32 __percpu *saved_ppi_conf;
+#endif
+ struct irq_domain *domain;
+ unsigned int gic_irqs;
+};
+
+#ifdef CONFIG_BL_SWITCHER
+
+static DEFINE_RAW_SPINLOCK(cpu_map_lock);
+
+#define gic_lock_irqsave(f) \
+ raw_spin_lock_irqsave(&cpu_map_lock, (f))
+#define gic_unlock_irqrestore(f) \
+ raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
+
+#define gic_lock() raw_spin_lock(&cpu_map_lock)
+#define gic_unlock() raw_spin_unlock(&cpu_map_lock)
+
+#else
+
+#define gic_lock_irqsave(f) do { (void)(f); } while(0)
+#define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
+
+#define gic_lock() do { } while(0)
+#define gic_unlock() do { } while(0)
+
+#endif
+
+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering. Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+
+static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
+
+static struct gic_kvm_info gic_v2_kvm_info __initdata;
+
+static DEFINE_PER_CPU(u32, sgi_intid);
+
+#ifdef CONFIG_GIC_NON_BANKED
+static DEFINE_STATIC_KEY_FALSE(frankengic_key);
+
+static void enable_frankengic(void)
+{
+ static_branch_enable(&frankengic_key);
+}
+
+static inline void __iomem *__get_base(union gic_base *base)
+{
+ if (static_branch_unlikely(&frankengic_key))
+ return raw_cpu_read(*base->percpu_base);
+
+ return base->common_base;
+}
+
+#define gic_data_dist_base(d) __get_base(&(d)->dist_base)
+#define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
+#else
+#define gic_data_dist_base(d) ((d)->dist_base.common_base)
+#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
+#define enable_frankengic() do { } while(0)
+#endif
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+static inline bool cascading_gic_irq(struct irq_data *d)
+{
+ void *data = irq_data_get_irq_handler_data(d);
+
+ /*
+ * If handler_data is set, this is a cascading interrupt, and
+ * it cannot possibly be forwarded.
+ */
+ return data != NULL;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+ writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+ return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
+}
+
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+ gic_mask_irq(d);
+ /*
+ * When masking a forwarded interrupt, make sure it is
+ * deactivated as well.
+ *
+ * This ensures that an interrupt that is getting
+ * disabled/masked will not get "stuck", because there is
+ * noone to deactivate it (guest is being terminated).
+ */
+ if (irqd_is_forwarded_to_vcpu(d))
+ gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GIC_DIST_ENABLE_SET);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ u32 hwirq = gic_irq(d);
+
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+ u32 hwirq = gic_irq(d);
+
+ /* Do not deactivate an IRQ forwarded to a vcpu. */
+ if (irqd_is_forwarded_to_vcpu(d))
+ return;
+
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+}
+
+static int gic_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool val)
+{
+ u32 reg;
+
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
+ break;
+
+ case IRQCHIP_STATE_ACTIVE:
+ reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
+ break;
+
+ case IRQCHIP_STATE_MASKED:
+ reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gic_poke_irq(d, reg);
+ return 0;
+}
+
+static int gic_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *val)
+{
+ switch (which) {
+ case IRQCHIP_STATE_PENDING:
+ *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
+ break;
+
+ case IRQCHIP_STATE_ACTIVE:
+ *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
+ break;
+
+ case IRQCHIP_STATE_MASKED:
+ *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem *base = gic_dist_base(d);
+ unsigned int gicirq = gic_irq(d);
+ int ret;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (gicirq < 16)
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
+
+ /* SPIs have restrictions on the supported types */
+ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+ type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
+ if (ret && gicirq < 32) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+ /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
+ if (cascading_gic_irq(d) || gic_irq(d) < 16)
+ return -EINVAL;
+
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
+ return 0;
+}
+
+static int gic_retrigger(struct irq_data *data)
+{
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
+}
+
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ struct gic_chip_data *gic = &gic_data[0];
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & GICC_IAR_INT_ID_MASK;
+
+ if (unlikely(irqnr >= 1020))
+ break;
+
+ if (static_branch_likely(&supports_deactivate_key))
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ isb();
+
+ /*
+ * Ensure any shared data written by the CPU sending the IPI
+ * is read after we've read the ACK register on the GIC.
+ *
+ * Pairs with the write barrier in gic_ipi_send_mask
+ */
+ if (irqnr <= 15) {
+ smp_rmb();
+
+ /*
+ * The GIC encodes the source CPU in GICC_IAR,
+ * leading to the deactivation to fail if not
+ * written back as is to GICC_EOI. Stash the INTID
+ * away for gic_eoi_irq() to write back. This only
+ * works because we don't nest SGIs...
+ */
+ this_cpu_write(sgi_intid, irqstat);
+ }
+
+ generic_handle_domain_irq(gic->domain, irqnr);
+ } while (1);
+}
+
+static void gic_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int gic_irq;
+ unsigned long status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+
+ gic_irq = (status & GICC_IAR_INT_ID_MASK);
+ if (gic_irq == GICC_INT_SPURIOUS)
+ goto out;
+
+ isb();
+ ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
+ if (unlikely(ret))
+ handle_bad_irq(desc);
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+
+ if (gic->domain->dev)
+ seq_printf(p, gic->domain->dev->of_node->name);
+ else
+ seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
+}
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+ BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+ &gic_data[gic_nr]);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask && num_possible_cpus() > 1)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
+static bool gic_check_gicv2(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + GIC_CPU_IDENT);
+ return (val & 0xff0fff) == 0x02043B;
+}
+
+static void gic_cpu_if_up(struct gic_chip_data *gic)
+{
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+ u32 bypass = 0;
+ u32 mode = 0;
+ int i;
+
+ if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
+ mode = GIC_CPU_CTRL_EOImodeNS;
+
+ if (gic_check_gicv2(cpu_base))
+ for (i = 0; i < 4; i++)
+ writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
+
+ /*
+ * Preserve bypass disable bits to be written back later
+ */
+ bypass = readl(cpu_base + GIC_CPU_CTRL);
+ bypass &= GICC_DIS_BYPASS_MASK;
+
+ writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
+}
+
+
+static void gic_dist_init(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 cpumask;
+ unsigned int gic_irqs = gic->gic_irqs;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+ gic_dist_config(base, gic_irqs, NULL);
+
+ writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
+}
+
+static int gic_cpu_init(struct gic_chip_data *gic)
+{
+ void __iomem *dist_base = gic_data_dist_base(gic);
+ void __iomem *base = gic_data_cpu_base(gic);
+ unsigned int cpu_mask, cpu = smp_processor_id();
+ int i;
+
+ /*
+ * Setting up the CPU map is only relevant for the primary GIC
+ * because any nested/secondary GICs do not directly interface
+ * with the CPU(s).
+ */
+ if (gic == &gic_data[0]) {
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ if (WARN_ON(cpu >= NR_GIC_CPU_IF))
+ return -EINVAL;
+
+ gic_check_cpu_features();
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
+
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+ }
+
+ gic_cpu_config(dist_base, 32, NULL);
+
+ writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
+ gic_cpu_if_up(gic);
+
+ return 0;
+}
+
+int gic_cpu_if_down(unsigned int gic_nr)
+{
+ void __iomem *cpu_base;
+ u32 val = 0;
+
+ if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
+ return -EINVAL;
+
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+ val = readl(cpu_base + GIC_CPU_CTRL);
+ val &= ~GICC_ENABLE;
+ writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+ return 0;
+}
+
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+void gic_dist_save(struct gic_chip_data *gic)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (WARN_ON(!gic))
+ return;
+
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic->saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic->saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic->saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic->saved_spi_active[i] =
+ readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occurred while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occurred will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+void gic_dist_restore(struct gic_chip_data *gic)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (WARN_ON(!gic))
+ return;
+
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic->saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(GICD_INT_DEF_PRI_X4,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic->saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ writel_relaxed(gic->saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+ writel_relaxed(gic->saved_spi_active[i],
+ dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+ }
+
+ writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
+}
+
+void gic_cpu_save(struct gic_chip_data *gic)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (WARN_ON(!gic))
+ return;
+
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+void gic_cpu_restore(struct gic_chip_data *gic)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (WARN_ON(!gic))
+ return;
+
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+ }
+
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(GICD_INT_DEF_PRI_X4,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
+ gic_cpu_if_up(gic);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(&gic_data[i]);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(&gic_data[i]);
+ break;
+ case CPU_CLUSTER_PM_ENTER:
+ gic_dist_save(&gic_data[i]);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ gic_dist_restore(&gic_data[i]);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
+static int gic_pm_init(struct gic_chip_data *gic)
+{
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ if (WARN_ON(!gic->saved_ppi_enable))
+ return -ENOMEM;
+
+ gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ if (WARN_ON(!gic->saved_ppi_active))
+ goto free_ppi_enable;
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ if (WARN_ON(!gic->saved_ppi_conf))
+ goto free_ppi_active;
+
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
+
+ return 0;
+
+free_ppi_active:
+ free_percpu(gic->saved_ppi_active);
+free_ppi_enable:
+ free_percpu(gic->saved_ppi_enable);
+
+ return -ENOMEM;
+}
+#else
+static int gic_pm_init(struct gic_chip_data *gic)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static void rmw_writeb(u8 bval, void __iomem *addr)
+{
+ static DEFINE_RAW_SPINLOCK(rmw_lock);
+ unsigned long offset = (unsigned long)addr & 3UL;
+ unsigned long shift = offset * 8;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&rmw_lock, flags);
+
+ addr -= offset;
+ val = readl_relaxed(addr);
+ val &= ~GENMASK(shift + 7, shift);
+ val |= bval << shift;
+ writel_relaxed(val, addr);
+
+ raw_spin_unlock_irqrestore(&rmw_lock, flags);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+ unsigned int cpu;
+
+ if (unlikely(gic != &gic_data[0]))
+ return -EINVAL;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&needs_rmw_access))
+ rmw_writeb(gic_cpu_map[cpu], reg);
+ else
+ writeb_relaxed(gic_cpu_map[cpu], reg);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+ int cpu;
+ unsigned long flags, map = 0;
+
+ if (unlikely(nr_cpu_ids == 1)) {
+ /* Only one CPU? let's do a self-IPI... */
+ writel_relaxed(2 << 24 | d->hwirq,
+ gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ return;
+ }
+
+ gic_lock_irqsave(flags);
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= gic_cpu_map[cpu];
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before they observe us issuing the IPI.
+ */
+ dmb(ishst);
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+ gic_unlock_irqrestore(flags);
+}
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+ gic_cpu_init(&gic_data[0]);
+ return 0;
+}
+
+static __init void gic_smp_init(void)
+{
+ struct irq_fwspec sgi_fwspec = {
+ .fwnode = gic_data[0].domain->fwnode,
+ .param_count = 1,
+ };
+ int base_sgi;
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gic:starting",
+ gic_starting_cpu, NULL);
+
+ base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
+ NUMA_NO_NODE, &sgi_fwspec,
+ false, NULL);
+ if (WARN_ON(base_sgi <= 0))
+ return;
+
+ set_smp_ipi_range(base_sgi, 8);
+}
+#else
+#define gic_smp_init() do { } while(0)
+#define gic_set_affinity NULL
+#define gic_ipi_send_mask NULL
+#endif
+
+static const struct irq_chip gic_chip = {
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_print_chip = gic_irq_print_chip,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static const struct irq_chip gic_chip_mode1 = {
+ .name = "GICv2",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+ .irq_set_affinity = gic_set_affinity,
+ .ipi_send_mask = gic_ipi_send_mask,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+ BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+ cpu_id = 1 << cpu_id;
+ /* this always happens on GIC0 */
+ writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+ unsigned int cpu_bit;
+
+ if (cpu >= NR_GIC_CPU_IF)
+ return -1;
+ cpu_bit = gic_cpu_map[cpu];
+ if (cpu_bit & (cpu_bit - 1))
+ return -1;
+ return __ffs(cpu_bit);
+}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id. The CPU interface mapping
+ * is also updated. Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+ unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+ void __iomem *dist_base;
+ int i, ror_val, cpu = smp_processor_id();
+ u32 val, cur_target_mask, active_mask;
+
+ BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ if (!dist_base)
+ return;
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+
+ cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+ cur_target_mask = 0x01010101 << cur_cpu_id;
+ ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+ gic_lock();
+
+ /* Update the target interface for this logical CPU */
+ gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+ /*
+ * Find all the peripheral interrupts targeting the current
+ * CPU interface and migrate them to the new CPU interface.
+ * We skip DIST_TARGET 0 to 7 as they are read-only.
+ */
+ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+ val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+ active_mask = val & cur_target_mask;
+ if (active_mask) {
+ val &= ~active_mask;
+ val |= ror32(active_mask, ror_val);
+ writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+ }
+ }
+
+ gic_unlock();
+
+ /*
+ * Now let's migrate and clear any potential SGIs that might be
+ * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
+ * is a banked register, we can only forward the SGI using
+ * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
+ * doesn't use that information anyway.
+ *
+ * For the same reason we do not adjust SGI source information
+ * for previously sent SGIs by us to other CPUs either.
+ */
+ for (i = 0; i < 16; i += 4) {
+ int j;
+ val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+ if (!val)
+ continue;
+ writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+ for (j = i; j < i + 4; j++) {
+ if (val & 0xff)
+ writel_relaxed((1 << (new_cpu_id + 16)) | j,
+ dist_base + GIC_DIST_SOFTINT);
+ val >>= 8;
+ }
+ }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * Return the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+ if (!gic_dist_physaddr)
+ return 0;
+ return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+static void __init gic_init_physaddr(struct device_node *node)
+{
+ struct resource res;
+ if (of_address_to_resource(node, 0, &res) == 0) {
+ gic_dist_physaddr = res.start;
+ pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+ }
+}
+
+#else
+#define gic_init_physaddr(node) do { } while (0)
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct gic_chip_data *gic = d->host_data;
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
+ const struct irq_chip *chip;
+
+ chip = (static_branch_likely(&supports_deactivate_key) &&
+ gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
+
+ switch (hw) {
+ case 0 ... 31:
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+ break;
+ default:
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ irq_set_probe(irq);
+ irqd_set_single_target(irqd);
+ break;
+ }
+
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
+ irqd_set_handle_enforce_irqctx(irqd);
+ return 0;
+}
+
+static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+}
+
+static int gic_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count < 3)
+ return -EINVAL;
+
+ switch (fwspec->param[0]) {
+ case 0: /* SPI */
+ *hwirq = fwspec->param[1] + 32;
+ break;
+ case 1: /* PPI */
+ *hwirq = fwspec->param[1] + 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ /* Make it clear that broken DTs are... broken */
+ WARN(*type == IRQ_TYPE_NONE,
+ "HW irq %ld has invalid type\n", *hwirq);
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ WARN(*type == IRQ_TYPE_NONE,
+ "HW irq %ld has invalid type\n", *hwirq);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
+ .translate = gic_irq_domain_translate,
+ .alloc = gic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .unmap = gic_irq_domain_unmap,
+};
+
+static int gic_init_bases(struct gic_chip_data *gic,
+ struct fwnode_handle *handle)
+{
+ int gic_irqs, ret;
+
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ /* Frankein-GIC without banked registers... */
+ unsigned int cpu;
+
+ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+ if (WARN_ON(!gic->dist_base.percpu_base ||
+ !gic->cpu_base.percpu_base)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for_each_possible_cpu(cpu) {
+ u32 mpidr = cpu_logical_map(cpu);
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned long offset = gic->percpu_offset * core_id;
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
+ gic->raw_dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
+ gic->raw_cpu_base + offset;
+ }
+
+ enable_frankengic();
+ } else {
+ /* Normal, sane GIC... */
+ WARN(gic->percpu_offset,
+ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+ gic->percpu_offset);
+ gic->dist_base.common_base = gic->raw_dist_base;
+ gic->cpu_base.common_base = gic->raw_cpu_base;
+ }
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
+ if (handle) { /* DT/ACPI */
+ gic->domain = irq_domain_create_linear(handle, gic_irqs,
+ &gic_irq_domain_hierarchy_ops,
+ gic);
+ } else { /* Legacy support */
+ /*
+ * For primary GICs, skip over SGIs.
+ * No secondary GIC support whatsoever.
+ */
+ int irq_base;
+
+ gic_irqs -= 16; /* calculate # of irqs to allocate */
+
+ irq_base = irq_alloc_descs(16, 16, gic_irqs,
+ numa_node_id());
+ if (irq_base < 0) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
+ irq_base = 16;
+ }
+
+ gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
+ 16, &gic_irq_domain_ops, gic);
+ }
+
+ if (WARN_ON(!gic->domain)) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ gic_dist_init(gic);
+ ret = gic_cpu_init(gic);
+ if (ret)
+ goto error;
+
+ ret = gic_pm_init(gic);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ }
+
+ return ret;
+}
+
+static int __init __gic_init_bases(struct gic_chip_data *gic,
+ struct fwnode_handle *handle)
+{
+ int i, ret;
+
+ if (WARN_ON(!gic || gic->domain))
+ return -EINVAL;
+
+ if (gic == &gic_data[0]) {
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ * This is only necessary for the primary GIC.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
+
+ set_handle_irq(gic_handle_irq);
+ if (static_branch_likely(&supports_deactivate_key))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
+ }
+
+ ret = gic_init_bases(gic, handle);
+ if (gic == &gic_data[0])
+ gic_smp_init();
+
+ return ret;
+}
+
+void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
+{
+ struct gic_chip_data *gic;
+
+ /*
+ * Non-DT/ACPI systems won't run a hypervisor, so let's not
+ * bother with these...
+ */
+ static_branch_disable(&supports_deactivate_key);
+
+ gic = &gic_data[0];
+ gic->raw_dist_base = dist_base;
+ gic->raw_cpu_base = cpu_base;
+
+ __gic_init_bases(gic, NULL);
+}
+
+static void gic_teardown(struct gic_chip_data *gic)
+{
+ if (WARN_ON(!gic))
+ return;
+
+ if (gic->raw_dist_base)
+ iounmap(gic->raw_dist_base);
+ if (gic->raw_cpu_base)
+ iounmap(gic->raw_cpu_base);
+}
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata;
+static bool gicv2_force_probe;
+
+static int __init gicv2_force_probe_cfg(char *buf)
+{
+ return strtobool(buf, &gicv2_force_probe);
+}
+early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
+
+static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
+{
+ struct resource cpuif_res;
+
+ of_address_to_resource(node, 1, &cpuif_res);
+
+ if (!is_hyp_mode_available())
+ return false;
+ if (resource_size(&cpuif_res) < SZ_8K) {
+ void __iomem *alt;
+ /*
+ * Check for a stupid firmware that only exposes the
+ * first page of a GICv2.
+ */
+ if (!gic_check_gicv2(*base))
+ return false;
+
+ if (!gicv2_force_probe) {
+ pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
+ return false;
+ }
+
+ alt = ioremap(cpuif_res.start, SZ_8K);
+ if (!alt)
+ return false;
+ if (!gic_check_gicv2(alt + SZ_4K)) {
+ /*
+ * The first page was that of a GICv2, and
+ * the second was *something*. Let's trust it
+ * to be a GICv2, and update the mapping.
+ */
+ pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
+ &cpuif_res.start);
+ iounmap(*base);
+ *base = alt;
+ return true;
+ }
+
+ /*
+ * We detected *two* initial GICv2 pages in a
+ * row. Could be a GICv2 aliased over two 64kB
+ * pages. Update the resource, map the iospace, and
+ * pray.
+ */
+ iounmap(alt);
+ alt = ioremap(cpuif_res.start, SZ_128K);
+ if (!alt)
+ return false;
+ pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
+ &cpuif_res.start);
+ cpuif_res.end = cpuif_res.start + SZ_128K -1;
+ iounmap(*base);
+ *base = alt;
+ }
+ if (resource_size(&cpuif_res) == SZ_128K) {
+ /*
+ * Verify that we have the first 4kB of a GICv2
+ * aliased over the first 64kB by checking the
+ * GICC_IIDR register on both ends.
+ */
+ if (!gic_check_gicv2(*base) ||
+ !gic_check_gicv2(*base + 0xf000))
+ return false;
+
+ /*
+ * Move the base up by 60kB, so that we have a 8kB
+ * contiguous region, which allows us to use GICC_DIR
+ * at its normal offset. Please pass me that bucket.
+ */
+ *base += 0xf000;
+ cpuif_res.start += 0xf000;
+ pr_warn("GIC: Adjusting CPU interface base to %pa\n",
+ &cpuif_res.start);
+ }
+
+ return true;
+}
+
+static bool gic_enable_rmw_access(void *data)
+{
+ /*
+ * The EMEV2 class of machines has a broken interconnect, and
+ * locks up on accesses that are less than 32bit. So far, only
+ * the affinity setting requires it.
+ */
+ if (of_machine_is_compatible("renesas,emev2")) {
+ static_branch_enable(&needs_rmw_access);
+ return true;
+ }
+
+ return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "broken byte access",
+ .compatible = "arm,pl390",
+ .init = gic_enable_rmw_access,
+ },
+ { },
+};
+
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+{
+ if (!gic || !node)
+ return -EINVAL;
+
+ gic->raw_dist_base = of_iomap(node, 0);
+ if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
+ goto error;
+
+ gic->raw_cpu_base = of_iomap(node, 1);
+ if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
+ goto error;
+
+ if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
+ gic->percpu_offset = 0;
+
+ gic_enable_of_quirks(node, gic_quirks, gic);
+
+ return 0;
+
+error:
+ gic_teardown(gic);
+
+ return -ENOMEM;
+}
+
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ int ret;
+
+ if (!dev || !dev->of_node || !gic || !irq)
+ return -EINVAL;
+
+ *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
+ if (!*gic)
+ return -ENOMEM;
+
+ ret = gic_of_setup(*gic, dev->of_node);
+ if (ret)
+ return ret;
+
+ ret = gic_init_bases(*gic, &dev->of_node->fwnode);
+ if (ret) {
+ gic_teardown(*gic);
+ return ret;
+ }
+
+ irq_domain_set_pm_device((*gic)->domain, dev);
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
+
+ return 0;
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+ int ret;
+ struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+ struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+ gic_v2_kvm_info.type = GIC_V2;
+
+ gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_v2_kvm_info.maint_irq)
+ return;
+
+ ret = of_address_to_resource(node, 2, vctrl_res);
+ if (ret)
+ return;
+
+ ret = of_address_to_resource(node, 3, vcpu_res);
+ if (ret)
+ return;
+
+ if (static_branch_likely(&supports_deactivate_key))
+ vgic_set_kvm_info(&gic_v2_kvm_info);
+}
+
+int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct gic_chip_data *gic;
+ int irq, ret;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
+ return -EINVAL;
+
+ gic = &gic_data[gic_cnt];
+
+ ret = gic_of_setup(gic, node);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable split EOI/Deactivate if either HYP is not available
+ * or the CPU interface is too small.
+ */
+ if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
+ static_branch_disable(&supports_deactivate_key);
+
+ ret = __gic_init_bases(gic, &node->fwnode);
+ if (ret) {
+ gic_teardown(gic);
+ return ret;
+ }
+
+ if (!gic_cnt) {
+ gic_init_physaddr(node);
+ gic_of_setup_kvm_info(node);
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+ gic_cascade_irq(gic_cnt, irq);
+ }
+
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
+
+ gic_cnt++;
+ return 0;
+}
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
+IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
+IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
+#else
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static struct
+{
+ phys_addr_t cpu_phys_base;
+ u32 maint_irq;
+ int maint_irq_mode;
+ phys_addr_t vctrl_base;
+ phys_addr_t vcpu_base;
+} acpi_data __initdata;
+
+static int __init
+gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+ phys_addr_t gic_cpu_base;
+ static int cpu_base_assigned;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_GICC_ENTRY(processor, end))
+ return -EINVAL;
+
+ /*
+ * There is no support for non-banked GICv1/2 register in ACPI spec.
+ * All CPU interface addresses have to be the same.
+ */
+ gic_cpu_base = processor->base_address;
+ if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
+ return -EINVAL;
+
+ acpi_data.cpu_phys_base = gic_cpu_base;
+ acpi_data.maint_irq = processor->vgic_interrupt;
+ acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+ acpi_data.vctrl_base = processor->gich_base_address;
+ acpi_data.vcpu_base = processor->gicv_base_address;
+
+ cpu_base_assigned = 1;
+ return 0;
+}
+
+/* The things you have to do to just *count* something... */
+static int __init acpi_dummy_func(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
+static bool __init acpi_gic_redist_is_present(void)
+{
+ return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+ acpi_dummy_func, 0) > 0;
+}
+
+static bool __init gic_validate_dist(struct acpi_subtable_header *header,
+ struct acpi_probe_entry *ape)
+{
+ struct acpi_madt_generic_distributor *dist;
+ dist = (struct acpi_madt_generic_distributor *)header;
+
+ return (dist->version == ape->driver_data &&
+ (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
+ !acpi_gic_redist_is_present()));
+}
+
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+ int irq;
+ struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+ struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+ gic_v2_kvm_info.type = GIC_V2;
+
+ if (!acpi_data.vctrl_base)
+ return;
+
+ vctrl_res->flags = IORESOURCE_MEM;
+ vctrl_res->start = acpi_data.vctrl_base;
+ vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
+
+ if (!acpi_data.vcpu_base)
+ return;
+
+ vcpu_res->flags = IORESOURCE_MEM;
+ vcpu_res->start = acpi_data.vcpu_base;
+ vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+
+ irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+ acpi_data.maint_irq_mode,
+ ACPI_ACTIVE_HIGH);
+ if (irq <= 0)
+ return;
+
+ gic_v2_kvm_info.maint_irq = irq;
+
+ vgic_set_kvm_info(&gic_v2_kvm_info);
+}
+
+static struct fwnode_handle *gsi_domain_handle;
+
+static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
+{
+ return gsi_domain_handle;
+}
+
+static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+ struct gic_chip_data *gic = &gic_data[0];
+ int count, ret;
+
+ /* Collect CPU base addresses */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_parse_madt_cpu, 0);
+ if (count <= 0) {
+ pr_err("No valid GICC entries exist\n");
+ return -EINVAL;
+ }
+
+ gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!gic->raw_cpu_base) {
+ pr_err("Unable to map GICC registers\n");
+ return -ENOMEM;
+ }
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+ gic->raw_dist_base = ioremap(dist->base_address,
+ ACPI_GICV2_DIST_MEM_SIZE);
+ if (!gic->raw_dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ gic_teardown(gic);
+ return -ENOMEM;
+ }
+
+ /*
+ * Disable split EOI/Deactivate if HYP is not available. ACPI
+ * guarantees that we'll always have a GICv2, so the CPU
+ * interface will always be the right size.
+ */
+ if (!is_hyp_mode_available())
+ static_branch_disable(&supports_deactivate_key);
+
+ /*
+ * Initialize GIC instance zero (no multi-GIC support).
+ */
+ gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
+ if (!gsi_domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ gic_teardown(gic);
+ return -ENOMEM;
+ }
+
+ ret = __gic_init_bases(gic, gsi_domain_handle);
+ if (ret) {
+ pr_err("Failed to initialise GIC\n");
+ irq_domain_free_fwnode(gsi_domain_handle);
+ gic_teardown(gic);
+ return ret;
+ }
+
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
+
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_init(NULL, gic_data[0].domain);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_acpi_setup_kvm_info();
+
+ return 0;
+}
+IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
+ gic_v2_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
+ gic_v2_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000..513f6edbb
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ generic_handle_domain_irq(gfpic->irq_domain, hwirq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
new file mode 100644
index 000000000..46161f6ff
--- /dev/null
+++ b/drivers/irqchip/irq-hip04.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon HiP04 INTC
+ *
+ * Copyright (C) 2002-2014 ARM Limited.
+ * Copyright (c) 2013-2014 HiSilicon Ltd.
+ * Copyright (c) 2013-2014 Linaro Ltd.
+ *
+ * Interrupt architecture for the HIP04 INTC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU. The base address of the CPU interface is usually
+ * aliased so that the same address points to different chips depending
+ * on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+
+#define HIP04_MAX_IRQS 510
+
+struct hip04_irq_data {
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+ struct irq_domain *domain;
+ unsigned int nr_irqs;
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering. Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_HIP04_CPU_IF 16
+static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
+
+static struct hip04_irq_data hip04_data __read_mostly;
+
+static inline void __iomem *hip04_dist_base(struct irq_data *d)
+{
+ struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
+ return hip04_data->dist_base;
+}
+
+static inline void __iomem *hip04_cpu_base(struct irq_data *d)
+{
+ struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
+ return hip04_data->cpu_base;
+}
+
+static inline unsigned int hip04_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void hip04_mask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (hip04_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
+ (hip04_irq(d) / 32) * 4);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void hip04_unmask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (hip04_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
+ (hip04_irq(d) / 32) * 4);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void hip04_eoi_irq(struct irq_data *d)
+{
+ writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem *base = hip04_dist_base(d);
+ unsigned int irq = hip04_irq(d);
+ int ret;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (irq < 16)
+ return -EINVAL;
+
+ /* SPIs have restrictions on the supported types */
+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
+ type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
+ if (ret && irq < 32) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
+ ret = 0;
+ }
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_SMP
+static int hip04_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg;
+ unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
+ u32 val, mask, bit;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+ reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
+ mask = 0xffff << shift;
+ bit = hip04_cpu_map[cpu] << shift;
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+ raw_spin_unlock(&irq_controller_lock);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+ int cpu;
+ unsigned long flags, map = 0;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= hip04_cpu_map[cpu];
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before they observe us issuing the IPI.
+ */
+ dmb(ishst);
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ void __iomem *cpu_base = hip04_data.cpu_base;
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & GICC_IAR_INT_ID_MASK;
+
+ if (irqnr <= HIP04_MAX_IRQS)
+ generic_handle_domain_irq(hip04_data.domain, irqnr);
+ } while (irqnr > HIP04_MAX_IRQS);
+}
+
+static struct irq_chip hip04_irq_chip = {
+ .name = "HIP04 INTC",
+ .irq_mask = hip04_mask_irq,
+ .irq_unmask = hip04_unmask_irq,
+ .irq_eoi = hip04_eoi_irq,
+ .irq_set_type = hip04_irq_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = hip04_irq_set_affinity,
+ .ipi_send_mask = hip04_ipi_send_mask,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
+{
+ void __iomem *base = intc->dist_base;
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 2) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
+ mask |= mask >> 16;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
+static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
+{
+ unsigned int i;
+ u32 cpumask;
+ unsigned int nr_irqs = intc->nr_irqs;
+ void __iomem *base = intc->dist_base;
+
+ writel_relaxed(0, base + GIC_DIST_CTRL);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ cpumask = hip04_get_cpumask(intc);
+ cpumask |= cpumask << 16;
+ for (i = 32; i < nr_irqs; i += 2)
+ writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
+
+ gic_dist_config(base, nr_irqs, NULL);
+
+ writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+
+static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
+{
+ void __iomem *dist_base = intc->dist_base;
+ void __iomem *base = intc->cpu_base;
+ unsigned int cpu_mask, cpu = smp_processor_id();
+ int i;
+
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_HIP04_CPU_IF);
+ cpu_mask = hip04_get_cpumask(intc);
+ hip04_cpu_map[cpu] = cpu_mask;
+
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_HIP04_CPU_IF; i++)
+ if (i != cpu)
+ hip04_cpu_map[i] &= ~cpu_mask;
+
+ gic_cpu_config(dist_base, 32, NULL);
+
+ writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &hip04_irq_chip,
+ handle_percpu_devid_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &hip04_irq_chip,
+ handle_fasteoi_irq);
+ irq_set_probe(irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int hip04_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (irq_domain_get_of_node(d) != controller)
+ return -EINVAL;
+ if (intsize == 1 && intspec[0] < 16) {
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+ if (intsize < 3)
+ return -EINVAL;
+
+ /* Get the interrupt number and add 16 to skip over SGIs */
+ *out_hwirq = intspec[1] + 16;
+
+ /* For SPIs, we need to add 16 more to get the irq ID number */
+ if (!intspec[0])
+ *out_hwirq += 16;
+
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int hip04_irq_starting_cpu(unsigned int cpu)
+{
+ hip04_irq_cpu_init(&hip04_data);
+ return 0;
+}
+
+static const struct irq_domain_ops hip04_irq_domain_ops = {
+ .map = hip04_irq_domain_map,
+ .xlate = hip04_irq_domain_xlate,
+};
+
+static int __init
+hip04_of_init(struct device_node *node, struct device_node *parent)
+{
+ int nr_irqs, irq_base, i;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ hip04_data.dist_base = of_iomap(node, 0);
+ WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
+
+ hip04_data.cpu_base = of_iomap(node, 1);
+ WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
+
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ */
+ for (i = 0; i < NR_HIP04_CPU_IF; i++)
+ hip04_cpu_map[i] = 0xffff;
+
+ /*
+ * Find out how many interrupts are supported.
+ * The HIP04 INTC only supports up to 510 interrupt sources.
+ */
+ nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
+ nr_irqs = (nr_irqs + 1) * 32;
+ if (nr_irqs > HIP04_MAX_IRQS)
+ nr_irqs = HIP04_MAX_IRQS;
+ hip04_data.nr_irqs = nr_irqs;
+
+ irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id());
+ if (irq_base < 0) {
+ pr_err("failed to allocate IRQ numbers\n");
+ return -EINVAL;
+ }
+
+ hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
+ 0,
+ &hip04_irq_domain_ops,
+ &hip04_data);
+ if (WARN_ON(!hip04_data.domain))
+ return -EINVAL;
+
+#ifdef CONFIG_SMP
+ set_smp_ipi_range(irq_base, 16);
+#endif
+ set_handle_irq(hip04_handle_irq);
+
+ hip04_irq_dist_init(&hip04_data);
+ cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
+ hip04_irq_starting_cpu, NULL);
+ return 0;
+}
+IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
new file mode 100644
index 000000000..b70ce0d3c
--- /dev/null
+++ b/drivers/irqchip/irq-i8259.c
@@ -0,0 +1,361 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ * plus some generic x86 specific things if generic specifics makes
+ * any sense at all.
+ * this file should become arch/i386/kernel/irq.c when the old irq.c
+ * moves to arch independent land
+ */
+
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
+
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .irq_mask = disable_8259A_irq,
+ .irq_disable = disable_8259A_irq,
+ .irq_unmask = enable_8259A_irq,
+ .irq_mask_ack = mask_and_ack_8259A,
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+void i8259_set_poll(int (*poll)(void))
+{
+ i8259_poll = poll;
+}
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define cached_master_mask (cached_irq_mask)
+#define cached_slave_mask (cached_irq_mask >> 8)
+
+static void disable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask |= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = ~(1 << irq);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask &= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+void make_8259A_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ enable_irq(irq);
+}
+
+/*
+ * This function assumes to be called rarely. Switching between
+ * 8259A registers is slow.
+ * This has to be protected by the irq controller spinlock
+ * before being called.
+ */
+static inline int i8259A_irq_real(unsigned int irq)
+{
+ int value;
+ int irqmask = 1 << irq;
+
+ if (irq < 8) {
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
+ value = inb(PIC_MASTER_CMD) & irqmask;
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
+ return value;
+ }
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
+ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
+ return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+static void mask_and_ack_8259A(struct irq_data *d)
+{
+ unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ irqmask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ /*
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
+ *
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
+ */
+ if (cached_irq_mask & irqmask)
+ goto spurious_8259A_irq;
+ cached_irq_mask |= irqmask;
+
+handle_real_irq:
+ if (irq & 8) {
+ inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+ } else {
+ inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ }
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+ return;
+
+spurious_8259A_irq:
+ /*
+ * this is the slow path - should happen rarely.
+ */
+ if (i8259A_irq_real(irq))
+ /*
+ * oops, the IRQ _is_ in service according to the
+ * 8259A - not spurious, go handle it.
+ */
+ goto handle_real_irq;
+
+ {
+ static int spurious_irq_mask;
+ /*
+ * At this point we can be sure the IRQ is spurious,
+ * lets ACK and report it. [once per IRQ]
+ */
+ if (!(spurious_irq_mask & irqmask)) {
+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+ atomic_inc(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+ * simpler for us.
+ */
+ goto handle_real_irq;
+ }
+}
+
+static void i8259A_resume(void)
+{
+ if (i8259A_auto_eoi >= 0)
+ init_8259A(i8259A_auto_eoi);
+}
+
+static void i8259A_shutdown(void)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ if (i8259A_auto_eoi >= 0) {
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+ }
+}
+
+static struct syscore_ops i8259_syscore_ops = {
+ .resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
+};
+
+static void init_8259A(int auto_eoi)
+{
+ unsigned long flags;
+
+ i8259A_auto_eoi = auto_eoi;
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+
+ /*
+ * outb_p - this has to work on a wide range of PC hardware.
+ */
+ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
+ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+ if (auto_eoi) /* master does Auto EOI */
+ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ else /* master expects normal EOI */
+ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+ outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
+ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
+ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+ if (auto_eoi)
+ /*
+ * In AEOI mode we just have to mask the interrupt
+ * when acking.
+ */
+ i8259A_chip.irq_mask_ack = disable_8259A_irq;
+ else
+ i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+
+ udelay(100); /* wait for 8259A to initialize */
+
+ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
+
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static struct resource pic1_io_resource = {
+ .name = "pic1",
+ .start = PIC_MASTER_CMD,
+ .end = PIC_MASTER_IMR,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static struct resource pic2_io_resource = {
+ .name = "pic2",
+ .start = PIC_SLAVE_CMD,
+ .end = PIC_SLAVE_IMR,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops i8259A_ops = {
+ .map = i8259A_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
+{
+ /*
+ * PIC_CASCADE_IR is cascade interrupt to second interrupt controller
+ */
+ int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
+ struct irq_domain *domain;
+
+ insert_resource(&ioport_resource, &pic1_io_resource);
+ insert_resource(&ioport_resource, &pic2_io_resource);
+
+ init_8259A(0);
+
+ domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
+ if (!domain)
+ panic("Failed to add i8259 IRQ domain");
+
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
+ register_syscore_ops(&i8259_syscore_ops);
+ return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+ __init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ int hwirq = i8259_poll();
+
+ if (hwirq < 0)
+ return;
+
+ generic_handle_domain_irq(domain, hwirq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ unsigned int parent_irq;
+
+ domain = __init_i8259_irqs(node);
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map i8259 parent IRQ\n");
+ irq_domain_remove(domain);
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
+ domain);
+ return 0;
+}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
new file mode 100644
index 000000000..0732a0e9a
--- /dev/null
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for IDT/Renesas 79RC3243x Interrupt Controller.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define IDT_PIC_NR_IRQS 32
+
+#define IDT_PIC_IRQ_PEND 0x00
+#define IDT_PIC_IRQ_MASK 0x08
+
+struct idt_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct irq_chip_generic *gc;
+};
+
+static void idt_irq_dispatch(struct irq_desc *desc)
+{
+ struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND);
+ pending &= ~idtpic->gc->mask_cache;
+ while (pending) {
+ hwirq = __fls(pending);
+ generic_handle_domain_irq(idtpic->irq_domain, hwirq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct idt_pic_data *idtpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL);
+ if (!idtpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ idtpic->base = of_iomap(of_node, 0);
+ if (!idtpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ idtpic->irq_domain = domain;
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (ret)
+ goto out_domain_remove;
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = idtpic->base;
+ gc->private = idtpic;
+
+ ct = gc->chip_types;
+ ct->regs.mask = IDT_PIC_IRQ_MASK;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ idtpic->gc = gc;
+
+ /* Mask interrupts. */
+ writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK);
+ gc->mask_cache = 0xffffffff;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ idt_irq_dispatch, idtpic);
+
+ return 0;
+
+out_domain_remove:
+ irq_domain_remove(domain);
+out_iounmap:
+ iounmap(idtpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(idtpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
new file mode 100644
index 000000000..5831be454
--- /dev/null
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG PowerDown Controller (PDC)
+ *
+ * Copyright 2010-2013 Imagination Technologies Ltd.
+ *
+ * Exposes the syswake and PDC peripheral wake interrupts to the system.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* PDC interrupt register numbers */
+
+#define PDC_IRQ_STATUS 0x310
+#define PDC_IRQ_ENABLE 0x314
+#define PDC_IRQ_CLEAR 0x318
+#define PDC_IRQ_ROUTE 0x31c
+#define PDC_SYS_WAKE_BASE 0x330
+#define PDC_SYS_WAKE_STRIDE 0x8
+#define PDC_SYS_WAKE_CONFIG_BASE 0x334
+#define PDC_SYS_WAKE_CONFIG_STRIDE 0x8
+
+/* PDC interrupt register field masks */
+
+#define PDC_IRQ_SYS3 0x08
+#define PDC_IRQ_SYS2 0x04
+#define PDC_IRQ_SYS1 0x02
+#define PDC_IRQ_SYS0 0x01
+#define PDC_IRQ_ROUTE_WU_EN_SYS3 0x08000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS2 0x04000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS1 0x02000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS0 0x01000000
+#define PDC_IRQ_ROUTE_WU_EN_WD 0x00040000
+#define PDC_IRQ_ROUTE_WU_EN_IR 0x00020000
+#define PDC_IRQ_ROUTE_WU_EN_RTC 0x00010000
+#define PDC_IRQ_ROUTE_EXT_EN_SYS3 0x00000800
+#define PDC_IRQ_ROUTE_EXT_EN_SYS2 0x00000400
+#define PDC_IRQ_ROUTE_EXT_EN_SYS1 0x00000200
+#define PDC_IRQ_ROUTE_EXT_EN_SYS0 0x00000100
+#define PDC_IRQ_ROUTE_EXT_EN_WD 0x00000004
+#define PDC_IRQ_ROUTE_EXT_EN_IR 0x00000002
+#define PDC_IRQ_ROUTE_EXT_EN_RTC 0x00000001
+#define PDC_SYS_WAKE_RESET 0x00000010
+#define PDC_SYS_WAKE_INT_MODE 0x0000000e
+#define PDC_SYS_WAKE_INT_MODE_SHIFT 1
+#define PDC_SYS_WAKE_PIN_VAL 0x00000001
+
+/* PDC interrupt constants */
+
+#define PDC_SYS_WAKE_INT_LOW 0x0
+#define PDC_SYS_WAKE_INT_HIGH 0x1
+#define PDC_SYS_WAKE_INT_DOWN 0x2
+#define PDC_SYS_WAKE_INT_UP 0x3
+#define PDC_SYS_WAKE_INT_CHANGE 0x6
+#define PDC_SYS_WAKE_INT_NONE 0x4
+
+/**
+ * struct pdc_intc_priv - private pdc interrupt data.
+ * @nr_perips: Number of peripheral interrupt signals.
+ * @nr_syswakes: Number of syswake signals.
+ * @perip_irqs: List of peripheral IRQ numbers handled.
+ * @syswake_irq: Shared PDC syswake IRQ number.
+ * @domain: IRQ domain for PDC peripheral and syswake IRQs.
+ * @pdc_base: Base of PDC registers.
+ * @irq_route: Cached version of PDC_IRQ_ROUTE register.
+ * @lock: Lock to protect the PDC syswake registers and the cached
+ * values of those registers in this struct.
+ */
+struct pdc_intc_priv {
+ unsigned int nr_perips;
+ unsigned int nr_syswakes;
+ unsigned int *perip_irqs;
+ unsigned int syswake_irq;
+ struct irq_domain *domain;
+ void __iomem *pdc_base;
+
+ u32 irq_route;
+ raw_spinlock_t lock;
+};
+
+static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
+ unsigned int data)
+{
+ iowrite32(data, priv->pdc_base + reg_offs);
+}
+
+static unsigned int pdc_read(struct pdc_intc_priv *priv,
+ unsigned int reg_offs)
+{
+ return ioread32(priv->pdc_base + reg_offs);
+}
+
+/* Generic IRQ callbacks */
+
+#define SYS0_HWIRQ 8
+
+static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
+{
+ return hw >= SYS0_HWIRQ;
+}
+
+static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
+{
+ return hw - SYS0_HWIRQ;
+}
+
+static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
+{
+ return SYS0_HWIRQ + syswake;
+}
+
+static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
+{
+ return (struct pdc_intc_priv *)data->domain->host_data;
+}
+
+/*
+ * perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains
+ * wake bits, therefore we cannot use the generic irqchip mask callbacks as they
+ * cache the mask.
+ */
+
+static void perip_irq_mask(struct irq_data *data)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+ raw_spin_lock(&priv->lock);
+ priv->irq_route &= ~data->mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+}
+
+static void perip_irq_unmask(struct irq_data *data)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+ raw_spin_lock(&priv->lock);
+ priv->irq_route |= data->mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+}
+
+static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+ unsigned int syswake = hwirq_to_syswake(data->hwirq);
+ unsigned int irq_mode;
+ unsigned int soc_sys_wake_regoff, soc_sys_wake;
+
+ /* translate to syswake IRQ mode */
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_mode = PDC_SYS_WAKE_INT_CHANGE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_mode = PDC_SYS_WAKE_INT_UP;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_mode = PDC_SYS_WAKE_INT_DOWN;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_mode = PDC_SYS_WAKE_INT_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_mode = PDC_SYS_WAKE_INT_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ raw_spin_lock(&priv->lock);
+
+ /* set the IRQ mode */
+ soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
+ soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
+ soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
+ soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
+ pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+
+ /* and update the handler */
+ irq_setup_alt_chip(data, flow_type);
+
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* applies to both peripheral and syswake interrupts */
+static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct pdc_intc_priv *priv = irqd_to_priv(data);
+ irq_hw_number_t hw = data->hwirq;
+ unsigned int mask = (1 << 16) << hw;
+ unsigned int dst_irq;
+
+ raw_spin_lock(&priv->lock);
+ if (on)
+ priv->irq_route |= mask;
+ else
+ priv->irq_route &= ~mask;
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+ raw_spin_unlock(&priv->lock);
+
+ /* control the destination IRQ wakeup too for standby mode */
+ if (hwirq_is_syswake(hw))
+ dst_irq = priv->syswake_irq;
+ else
+ dst_irq = priv->perip_irqs[hw];
+ irq_set_irq_wake(dst_irq, on);
+
+ return 0;
+}
+
+static void pdc_intc_perip_isr(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct pdc_intc_priv *priv;
+ unsigned int i;
+
+ priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+ /* find the peripheral number */
+ for (i = 0; i < priv->nr_perips; ++i)
+ if (irq == priv->perip_irqs[i])
+ goto found;
+
+ /* should never get here */
+ return;
+found:
+
+ /* pass on the interrupt */
+ generic_handle_domain_irq(priv->domain, i);
+}
+
+static void pdc_intc_syswake_isr(struct irq_desc *desc)
+{
+ struct pdc_intc_priv *priv;
+ unsigned int syswake;
+ unsigned int status;
+
+ priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+ status = pdc_read(priv, PDC_IRQ_STATUS) &
+ pdc_read(priv, PDC_IRQ_ENABLE);
+ status &= (1 << priv->nr_syswakes) - 1;
+
+ for (syswake = 0; status; status >>= 1, ++syswake) {
+ /* Has this sys_wake triggered? */
+ if (!(status & 1))
+ continue;
+
+ generic_handle_domain_irq(priv->domain, syswake_to_hwirq(syswake));
+ }
+}
+
+static void pdc_intc_setup(struct pdc_intc_priv *priv)
+{
+ int i;
+ unsigned int soc_sys_wake_regoff;
+ unsigned int soc_sys_wake;
+
+ /*
+ * Mask all syswake interrupts before routing, or we could receive an
+ * interrupt before we're ready to handle it.
+ */
+ pdc_write(priv, PDC_IRQ_ENABLE, 0);
+
+ /*
+ * Enable routing of all syswakes
+ * Disable all wake sources
+ */
+ priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
+ PDC_IRQ_ROUTE_EXT_EN_SYS0);
+ pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+
+ /* Initialise syswake IRQ */
+ for (i = 0; i < priv->nr_syswakes; ++i) {
+ /* set the IRQ mode to none */
+ soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
+ soc_sys_wake = PDC_SYS_WAKE_INT_NONE
+ << PDC_SYS_WAKE_INT_MODE_SHIFT;
+ pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+ }
+}
+
+static int pdc_intc_probe(struct platform_device *pdev)
+{
+ struct pdc_intc_priv *priv;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res_regs;
+ struct irq_chip_generic *gc;
+ unsigned int i;
+ int irq, ret;
+ u32 val;
+
+ if (!node)
+ return -ENOENT;
+
+ /* Get registers */
+ res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_regs == NULL) {
+ dev_err(&pdev->dev, "cannot find registers resource\n");
+ return -ENOENT;
+ }
+
+ /* Allocate driver data */
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ raw_spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ /* Ioremap the registers */
+ priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
+ resource_size(res_regs));
+ if (!priv->pdc_base)
+ return -EIO;
+
+ /* Get number of peripherals */
+ ret = of_property_read_u32(node, "num-perips", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "No num-perips node property found\n");
+ return -EINVAL;
+ }
+ if (val > SYS0_HWIRQ) {
+ dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
+ return -EINVAL;
+ }
+ priv->nr_perips = val;
+
+ /* Get number of syswakes */
+ ret = of_property_read_u32(node, "num-syswakes", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "No num-syswakes node property found\n");
+ return -EINVAL;
+ }
+ if (val > SYS0_HWIRQ) {
+ dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
+ return -EINVAL;
+ }
+ priv->nr_syswakes = val;
+
+ /* Get peripheral IRQ numbers */
+ priv->perip_irqs = devm_kcalloc(&pdev->dev, 4, priv->nr_perips,
+ GFP_KERNEL);
+ if (!priv->perip_irqs)
+ return -ENOMEM;
+ for (i = 0; i < priv->nr_perips; ++i) {
+ irq = platform_get_irq(pdev, 1 + i);
+ if (irq < 0)
+ return irq;
+ priv->perip_irqs[i] = irq;
+ }
+ /* check if too many were provided */
+ if (platform_get_irq(pdev, 1 + i) >= 0) {
+ dev_err(&pdev->dev, "surplus perip IRQs detected\n");
+ return -EINVAL;
+ }
+
+ /* Get syswake IRQ number */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ priv->syswake_irq = irq;
+
+ /* Set up an IRQ domain */
+ priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+ priv);
+ if (unlikely(!priv->domain)) {
+ dev_err(&pdev->dev, "cannot add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Set up 2 generic irq chips with 2 chip types.
+ * The first one for peripheral irqs (only 1 chip type used)
+ * The second one for syswake irqs (edge and level chip types)
+ */
+ ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
+ handle_level_irq, 0, 0,
+ IRQ_GC_INIT_NESTED_LOCK);
+ if (ret)
+ goto err_generic;
+
+ /* peripheral interrupt chip */
+
+ gc = irq_get_domain_generic_chip(priv->domain, 0);
+ gc->unused = ~(BIT(priv->nr_perips) - 1);
+ gc->reg_base = priv->pdc_base;
+ /*
+ * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
+ * they cache the mask
+ */
+ gc->chip_types[0].regs.mask = PDC_IRQ_ROUTE;
+ gc->chip_types[0].chip.irq_mask = perip_irq_mask;
+ gc->chip_types[0].chip.irq_unmask = perip_irq_unmask;
+ gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
+
+ /* syswake interrupt chip */
+
+ gc = irq_get_domain_generic_chip(priv->domain, 8);
+ gc->unused = ~(BIT(priv->nr_syswakes) - 1);
+ gc->reg_base = priv->pdc_base;
+
+ /* edge interrupts */
+ gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[0].handler = handle_edge_irq;
+ gc->chip_types[0].regs.ack = PDC_IRQ_CLEAR;
+ gc->chip_types[0].regs.mask = PDC_IRQ_ENABLE;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_set_type = syswake_irq_set_type;
+ gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
+ /* for standby we pass on to the shared syswake IRQ */
+ gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ /* level interrupts */
+ gc->chip_types[1].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[1].handler = handle_level_irq;
+ gc->chip_types[1].regs.ack = PDC_IRQ_CLEAR;
+ gc->chip_types[1].regs.mask = PDC_IRQ_ENABLE;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = syswake_irq_set_type;
+ gc->chip_types[1].chip.irq_set_wake = pdc_irq_set_wake;
+ /* for standby we pass on to the shared syswake IRQ */
+ gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ /* Set up the hardware to enable interrupt routing */
+ pdc_intc_setup(priv);
+
+ /* Setup chained handlers for the peripheral IRQs */
+ for (i = 0; i < priv->nr_perips; ++i) {
+ irq = priv->perip_irqs[i];
+ irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr,
+ priv);
+ }
+
+ /* Setup chained handler for the syswake IRQ */
+ irq_set_chained_handler_and_data(priv->syswake_irq,
+ pdc_intc_syswake_isr, priv);
+
+ dev_info(&pdev->dev,
+ "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+ priv->nr_perips,
+ priv->nr_syswakes);
+
+ return 0;
+err_generic:
+ irq_domain_remove(priv->domain);
+ return ret;
+}
+
+static int pdc_intc_remove(struct platform_device *pdev)
+{
+ struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->domain);
+ return 0;
+}
+
+static const struct of_device_id pdc_intc_match[] = {
+ { .compatible = "img,pdc-intc" },
+ {}
+};
+
+static struct platform_driver pdc_intc_driver = {
+ .driver = {
+ .name = "pdc-intc",
+ .of_match_table = pdc_intc_match,
+ },
+ .probe = pdc_intc_probe,
+ .remove = pdc_intc_remove,
+};
+
+static int __init pdc_intc_init(void)
+{
+ return platform_driver_register(&pdc_intc_driver);
+}
+core_initcall(pdc_intc_init);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
new file mode 100644
index 000000000..b9c22f764
--- /dev/null
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/syscore_ops.h>
+
+#define IMR_NUM 4
+#define GPC_MAX_IRQS (IMR_NUM * 32)
+
+#define GPC_IMR1_CORE0 0x30
+#define GPC_IMR1_CORE1 0x40
+#define GPC_IMR1_CORE2 0x1c0
+#define GPC_IMR1_CORE3 0x1d0
+
+
+struct gpcv2_irqchip_data {
+ struct raw_spinlock rlock;
+ void __iomem *gpc_base;
+ u32 wakeup_sources[IMR_NUM];
+ u32 saved_irq_mask[IMR_NUM];
+ u32 cpu2wakeup;
+};
+
+static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init;
+
+static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
+{
+ return cd->gpc_base + cd->cpu2wakeup + i * 4;
+}
+
+static int gpcv2_wakeup_source_save(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ void __iomem *reg;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return 0;
+
+ for (i = 0; i < IMR_NUM; i++) {
+ reg = gpcv2_idx_to_reg(cd, i);
+ cd->saved_irq_mask[i] = readl_relaxed(reg);
+ writel_relaxed(cd->wakeup_sources[i], reg);
+ }
+
+ return 0;
+}
+
+static void gpcv2_wakeup_source_restore(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return;
+
+ for (i = 0; i < IMR_NUM; i++)
+ writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
+}
+
+static struct syscore_ops imx_gpcv2_syscore_ops = {
+ .suspend = gpcv2_wakeup_source_save,
+ .resume = gpcv2_wakeup_source_restore,
+};
+
+static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ unsigned int idx = d->hwirq / 32;
+ unsigned long flags;
+ u32 mask, val;
+
+ raw_spin_lock_irqsave(&cd->rlock, flags);
+ mask = BIT(d->hwirq % 32);
+ val = cd->wakeup_sources[idx];
+
+ cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
+ raw_spin_unlock_irqrestore(&cd->rlock, flags);
+
+ /*
+ * Do *not* call into the parent, as the GIC doesn't have any
+ * wake-up facility...
+ */
+
+ return 0;
+}
+
+static void imx_gpcv2_irq_unmask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
+ val = readl_relaxed(reg);
+ val &= ~BIT(d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_unmask_parent(d);
+}
+
+static void imx_gpcv2_irq_mask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
+ val = readl_relaxed(reg);
+ val |= BIT(d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_mask_parent(d);
+}
+
+static struct irq_chip gpcv2_irqchip_data_chip = {
+ .name = "GPCv2",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = imx_gpcv2_irq_mask,
+ .irq_unmask = imx_gpcv2_irq_unmask,
+ .irq_set_wake = imx_gpcv2_irq_set_wake,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpcv2_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int err;
+ int i;
+
+ err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
+ if (err)
+ return err;
+
+ if (hwirq >= GPC_MAX_IRQS)
+ return -EINVAL;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+ &gpcv2_irqchip_data_chip, domain->host_data);
+ }
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+ .translate = imx_gpcv2_domain_translate,
+ .alloc = imx_gpcv2_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static const struct of_device_id gpcv2_of_match[] = {
+ { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 },
+ { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 },
+ { /* END */ }
+};
+
+static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct gpcv2_irqchip_data *cd;
+ const struct of_device_id *id;
+ unsigned long core_num;
+ int i;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ id = of_match_node(gpcv2_of_match, node);
+ if (!id) {
+ pr_err("%pOF: unknown compatibility string\n", node);
+ return -ENODEV;
+ }
+
+ core_num = (unsigned long)id->data;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to get parent domain\n", node);
+ return -ENXIO;
+ }
+
+ cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&cd->rlock);
+
+ cd->gpc_base = of_iomap(node, 0);
+ if (!cd->gpc_base) {
+ pr_err("%pOF: unable to map gpc registers\n", node);
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ node, &gpcv2_irqchip_data_domain_ops, cd);
+ if (!domain) {
+ iounmap(cd->gpc_base);
+ kfree(cd);
+ return -ENOMEM;
+ }
+ irq_set_default_host(domain);
+
+ /* Initially mask all interrupts */
+ for (i = 0; i < IMR_NUM; i++) {
+ void __iomem *reg = cd->gpc_base + i * 4;
+
+ switch (core_num) {
+ case 4:
+ writel_relaxed(~0, reg + GPC_IMR1_CORE2);
+ writel_relaxed(~0, reg + GPC_IMR1_CORE3);
+ fallthrough;
+ case 2:
+ writel_relaxed(~0, reg + GPC_IMR1_CORE0);
+ writel_relaxed(~0, reg + GPC_IMR1_CORE1);
+ }
+ cd->wakeup_sources[i] = ~0;
+ }
+
+ /* Let CORE0 as the default CPU to wake up by GPC */
+ cd->cpu2wakeup = GPC_IMR1_CORE0;
+
+ /*
+ * Due to hardware design failure, need to make sure GPR
+ * interrupt(#32) is unmasked during RUN mode to avoid entering
+ * DSM by mistake.
+ */
+ writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
+
+ imx_gpcv2_instance = cd;
+ register_syscore_ops(&imx_gpcv2_syscore_ops);
+
+ /*
+ * Clear the OF_POPULATED flag set in of_irq_init so that
+ * later the GPC power domain driver will not be skipped.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
+ return 0;
+}
+
+IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
+IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
new file mode 100644
index 000000000..80aaea824
--- /dev/null
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 NXP
+
+/* INTMUX Block Diagram
+ *
+ * ________________
+ * interrupt source # 0 +---->| |
+ * | | |
+ * interrupt source # 1 +++-->| |
+ * ... | | | channel # 0 |--------->interrupt out # 0
+ * ... | | | |
+ * ... | | | |
+ * interrupt source # X-1 +++-->|________________|
+ * | | |
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | | |
+ * | +-->| |
+ * | | | | channel # 1 |--------->interrupt out # 1
+ * | | +>| |
+ * | | | | |
+ * | | | |________________|
+ * | | |
+ * | | |
+ * | | | ...
+ * | | | ...
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | |
+ * +-->| |
+ * | | channel # N |--------->interrupt out # N
+ * +>| |
+ * | |
+ * |________________|
+ *
+ *
+ * N: Interrupt Channel Instance Number (N=7)
+ * X: Interrupt Source Number for each channel (X=32)
+ *
+ * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32
+ * interrupt sources and generates 1 interrupt output.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+
+#define CHANIER(n) (0x10 + (0x40 * n))
+#define CHANIPR(n) (0x20 + (0x40 * n))
+
+#define CHAN_MAX_NUM 0x8
+
+struct intmux_irqchip_data {
+ u32 saved_reg;
+ int chanidx;
+ int irq;
+ struct irq_domain *domain;
+};
+
+struct intmux_data {
+ raw_spinlock_t lock;
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int channum;
+ struct intmux_irqchip_data irqchip_data[];
+};
+
+static void imx_intmux_irq_mask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* disable the interrupt source of this channel */
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_intmux_irq_unmask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* enable the interrupt source of this channel */
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_intmux_irq_chip __ro_after_init = {
+ .name = "intmux",
+ .irq_mask = imx_intmux_irq_mask,
+ .irq_unmask = imx_intmux_irq_unmask,
+};
+
+static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct intmux_irqchip_data *data = h->host_data;
+
+ irq_set_chip_data(irq, data);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+
+ /*
+ * two cells needed in interrupt specifier:
+ * the 1st cell: hw interrupt number
+ * the 2nd cell: channel index
+ */
+ if (WARN_ON(intsize != 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[1] >= data->channum))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return false;
+
+ return irqchip_data->chanidx == fwspec->param[1];
+}
+
+static const struct irq_domain_ops imx_intmux_domain_ops = {
+ .map = imx_intmux_irq_map,
+ .xlate = imx_intmux_irq_xlate,
+ .select = imx_intmux_irq_select,
+};
+
+static void imx_intmux_irq_handler(struct irq_desc *desc)
+{
+ struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc);
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long irqstat;
+ int pos;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ /* read the interrupt source pending status of this channel */
+ irqstat = readl_relaxed(data->regs + CHANIPR(idx));
+
+ for_each_set_bit(pos, &irqstat, 32)
+ generic_handle_domain_irq(irqchip_data->domain, pos);
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_intmux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irq_domain *domain;
+ struct intmux_data *data;
+ int channum;
+ int i, ret;
+
+ channum = platform_irq_count(pdev);
+ if (channum == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (channum > CHAN_MAX_NUM) {
+ dev_err(&pdev->dev, "supports up to %d multiplex channels\n",
+ CHAN_MAX_NUM);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
+ "failed to get ipg clk\n");
+
+ data->channum = channum;
+ raw_spin_lock_init(&data->lock);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < channum; i++) {
+ data->irqchip_data[i].chanidx = i;
+
+ data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
+ if (data->irqchip_data[i].irq <= 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto out;
+ }
+
+ domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ &data->irqchip_data[i]);
+ if (!domain) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out;
+ }
+ data->irqchip_data[i].domain = domain;
+ irq_domain_set_pm_device(domain, &pdev->dev);
+
+ /* disable all interrupt sources of this channel firstly */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ imx_intmux_irq_handler,
+ &data->irqchip_data[i]);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * Let pm_runtime_put() disable clock.
+ * If CONFIG_PM is not enabled, the clock will stay powered.
+ */
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
+}
+
+static int imx_intmux_remove(struct platform_device *pdev)
+{
+ struct intmux_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ /* disable all interrupt sources of this channel */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ NULL, NULL);
+
+ irq_domain_remove(data->irqchip_data[i].domain);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_intmux_runtime_suspend(struct device *dev)
+{
+ struct intmux_data *data = dev_get_drvdata(dev);
+ struct intmux_irqchip_data *irqchip_data;
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ irqchip_data = &data->irqchip_data[i];
+ irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i));
+ }
+
+ clk_disable_unprepare(data->ipg_clk);
+
+ return 0;
+}
+
+static int imx_intmux_runtime_resume(struct device *dev)
+{
+ struct intmux_data *data = dev_get_drvdata(dev);
+ struct intmux_irqchip_data *irqchip_data;
+ int ret, i;
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < data->channum; i++) {
+ irqchip_data = &data->irqchip_data[i];
+ writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i));
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx_intmux_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend,
+ imx_intmux_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx_intmux_id[] = {
+ { .compatible = "fsl,imx-intmux", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver imx_intmux_driver = {
+ .driver = {
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ .pm = &imx_intmux_pm_ops,
+ },
+ .probe = imx_intmux_probe,
+ .remove = imx_intmux_remove,
+};
+builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
new file mode 100644
index 000000000..96230a04e
--- /dev/null
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2017 NXP
+ * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
+#define CHANCTRL 0x0
+#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
+#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
+#define CHANSTATUS(n, t) (CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
+#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
+#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
+
+#define CHAN_MAX_OUTPUT_INT 0x8
+
+struct irqsteer_data {
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int irq[CHAN_MAX_OUTPUT_INT];
+ int irq_count;
+ raw_spinlock_t lock;
+ int reg_num;
+ int channel;
+ struct irq_domain *domain;
+ u32 *saved_reg;
+};
+
+static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+ unsigned long irqnum)
+{
+ return (data->reg_num - irqnum / 32 - 1);
+}
+
+static void imx_irqsteer_irq_unmask(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+ int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
+ val |= BIT(d->hwirq % 32);
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_irqsteer_irq_mask(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+ int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
+ val &= ~BIT(d->hwirq % 32);
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static const struct irq_chip imx_irqsteer_irq_chip = {
+ .name = "irqsteer",
+ .irq_mask = imx_irqsteer_irq_mask,
+ .irq_unmask = imx_irqsteer_irq_unmask,
+};
+
+static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops imx_irqsteer_domain_ops = {
+ .map = imx_irqsteer_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
+{
+ int i;
+
+ for (i = 0; i < data->irq_count; i++) {
+ if (data->irq[i] == irq)
+ return i * 64;
+ }
+
+ return -EINVAL;
+}
+
+static void imx_irqsteer_irq_handler(struct irq_desc *desc)
+{
+ struct irqsteer_data *data = irq_desc_get_handler_data(desc);
+ int hwirq;
+ int irq, i;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ irq = irq_desc_get_irq(desc);
+ hwirq = imx_irqsteer_get_hwirq_base(data, irq);
+ if (hwirq < 0) {
+ pr_warn("%s: unable to get hwirq base for irq %d\n",
+ __func__, irq);
+ return;
+ }
+
+ for (i = 0; i < 2; i++, hwirq += 32) {
+ int idx = imx_irqsteer_get_reg_index(data, hwirq);
+ unsigned long irqmap;
+ int pos;
+
+ if (hwirq >= data->reg_num * 32)
+ break;
+
+ irqmap = readl_relaxed(data->regs +
+ CHANSTATUS(idx, data->reg_num));
+
+ for_each_set_bit(pos, &irqmap, 32)
+ generic_handle_domain_irq(data->domain, pos + hwirq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_irqsteer_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irqsteer_data *data;
+ u32 irqs_num;
+ int i, ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
+ "failed to get ipg clk\n");
+
+ raw_spin_lock_init(&data->lock);
+
+ ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "fsl,channel", &data->channel);
+ if (ret)
+ return ret;
+
+ /*
+ * There is one output irq for each group of 64 inputs.
+ * One register bit map can represent 32 input interrupts.
+ */
+ data->irq_count = DIV_ROUND_UP(irqs_num, 64);
+ data->reg_num = irqs_num / 32;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ data->saved_reg = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * data->reg_num,
+ GFP_KERNEL);
+ if (!data->saved_reg)
+ return -ENOMEM;
+ }
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ /* steer all IRQs into configured channel */
+ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
+
+ data->domain = irq_domain_add_linear(np, data->reg_num * 32,
+ &imx_irqsteer_domain_ops, data);
+ if (!data->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ irq_domain_set_pm_device(data->domain, &pdev->dev);
+
+ if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->irq_count; i++) {
+ data->irq[i] = irq_of_parse_and_map(np, i);
+ if (!data->irq[i]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ imx_irqsteer_irq_handler,
+ data);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
+}
+
+static int imx_irqsteer_remove(struct platform_device *pdev)
+{
+ struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < irqsteer_data->irq_count; i++)
+ irq_set_chained_handler_and_data(irqsteer_data->irq[i],
+ NULL, NULL);
+
+ irq_domain_remove(irqsteer_data->domain);
+
+ clk_disable_unprepare(irqsteer_data->ipg_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void imx_irqsteer_save_regs(struct irqsteer_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->reg_num; i++)
+ data->saved_reg[i] = readl_relaxed(data->regs +
+ CHANMASK(i, data->reg_num));
+}
+
+static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
+{
+ int i;
+
+ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
+ for (i = 0; i < data->reg_num; i++)
+ writel_relaxed(data->saved_reg[i],
+ data->regs + CHANMASK(i, data->reg_num));
+}
+
+static int imx_irqsteer_suspend(struct device *dev)
+{
+ struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
+
+ imx_irqsteer_save_regs(irqsteer_data);
+ clk_disable_unprepare(irqsteer_data->ipg_clk);
+
+ return 0;
+}
+
+static int imx_irqsteer_resume(struct device *dev)
+{
+ struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(irqsteer_data->ipg_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+ imx_irqsteer_restore_regs(irqsteer_data);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx_irqsteer_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(imx_irqsteer_suspend,
+ imx_irqsteer_resume, NULL)
+};
+
+static const struct of_device_id imx_irqsteer_dt_ids[] = {
+ { .compatible = "fsl,imx-irqsteer", },
+ {},
+};
+
+static struct platform_driver imx_irqsteer_driver = {
+ .driver = {
+ .name = "imx-irqsteer",
+ .of_match_table = imx_irqsteer_dt_ids,
+ .pm = &imx_irqsteer_pm_ops,
+ },
+ .probe = imx_irqsteer_probe,
+ .remove = imx_irqsteer_remove,
+};
+builtin_platform_driver(imx_irqsteer_driver);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
new file mode 100644
index 000000000..229039eda
--- /dev/null
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Freescale MU used as MSI controller
+ *
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ * Copyright 2022 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ * Peng Fan <peng.fan@nxp.com>
+ *
+ * Based on drivers/mailbox/imx-mailbox.c
+ */
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/spinlock.h>
+
+#define IMX_MU_CHANS 4
+
+enum imx_mu_xcr {
+ IMX_MU_GIER,
+ IMX_MU_GCR,
+ IMX_MU_TCR,
+ IMX_MU_RCR,
+ IMX_MU_xCR_MAX,
+};
+
+enum imx_mu_xsr {
+ IMX_MU_SR,
+ IMX_MU_GSR,
+ IMX_MU_TSR,
+ IMX_MU_RSR,
+ IMX_MU_xSR_MAX
+};
+
+enum imx_mu_type {
+ IMX_MU_V2 = BIT(1),
+};
+
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+
+struct imx_mu_dcfg {
+ enum imx_mu_type type;
+ u32 xTR; /* Transmit Register0 */
+ u32 xRR; /* Receive Register0 */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
+};
+
+struct imx_mu_msi {
+ raw_spinlock_t lock;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ const struct imx_mu_dcfg *cfg;
+ unsigned long used;
+ struct clk *clk;
+};
+
+static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs)
+{
+ iowrite32(val, msi_data->regs + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs)
+{
+ return ioread32(msi_data->regs + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ return val;
+}
+
+static void imx_mu_msi_parent_mask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq));
+}
+
+static void imx_mu_msi_parent_unmask_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0);
+}
+
+static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
+}
+
+static struct irq_chip imx_mu_msi_irq_chip = {
+ .name = "MU-MSI",
+ .irq_ack = irq_chip_ack_parent,
+};
+
+static struct msi_domain_ops imx_mu_msi_irq_ops = {
+};
+
+static struct msi_domain_info imx_mu_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &imx_mu_msi_irq_ops,
+ .chip = &imx_mu_msi_irq_chip,
+};
+
+static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
+ u64 addr = msi_data->msiir_addr + 4 * data->hwirq;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = data->hwirq;
+}
+
+static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip imx_mu_msi_parent_chip = {
+ .name = "MU",
+ .irq_mask = imx_mu_msi_parent_mask_irq,
+ .irq_unmask = imx_mu_msi_parent_unmask_irq,
+ .irq_ack = imx_mu_msi_parent_ack_irq,
+ .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg,
+ .irq_set_affinity = imx_mu_msi_parent_set_affinity,
+};
+
+static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct imx_mu_msi *msi_data = domain->host_data;
+ unsigned long flags;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS);
+ if (pos < IMX_MU_CHANS)
+ __set_bit(pos, &msi_data->used);
+ else
+ err = -ENOSPC;
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &imx_mu_msi_parent_chip, msi_data,
+ handle_edge_irq, NULL, NULL);
+ return 0;
+}
+
+static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msi_data->lock, flags);
+ __clear_bit(d->hwirq, &msi_data->used);
+ raw_spin_unlock_irqrestore(&msi_data->lock, flags);
+}
+
+static const struct irq_domain_ops imx_mu_msi_domain_ops = {
+ .alloc = imx_mu_msi_domain_irq_alloc,
+ .free = imx_mu_msi_domain_irq_free,
+};
+
+static void imx_mu_msi_irq_handler(struct irq_desc *desc)
+{
+ struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+ int i;
+
+ status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]);
+
+ chained_irq_enter(chip, desc);
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ if (status & IMX_MU_xSR_RFn(msi_data, i))
+ generic_handle_domain_irq(msi_data->msi_domain, i);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
+{
+ struct fwnode_handle *fwnodes = dev_fwnode(dev);
+ struct irq_domain *parent;
+
+ /* Initialize MSI domain parent */
+ parent = irq_domain_create_linear(fwnodes,
+ IMX_MU_CHANS,
+ &imx_mu_msi_domain_ops,
+ msi_data);
+ if (!parent) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
+ &imx_mu_msi_domain_info,
+ parent);
+
+ if (!msi_data->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ irq_domain_set_pm_device(msi_data->msi_domain, dev);
+
+ return 0;
+}
+
+/* Register offset of different version MU IP */
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .type = 0,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {
+ [IMX_MU_SR] = 0x20,
+ [IMX_MU_GSR] = 0x20,
+ [IMX_MU_TSR] = 0x20,
+ [IMX_MU_RSR] = 0x20,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x24,
+ [IMX_MU_GCR] = 0x24,
+ [IMX_MU_TCR] = 0x24,
+ [IMX_MU_RCR] = 0x24,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .type = 0,
+ .xTR = 0x20,
+ .xRR = 0x40,
+ .xSR = {
+ [IMX_MU_SR] = 0x60,
+ [IMX_MU_GSR] = 0x60,
+ [IMX_MU_TSR] = 0x60,
+ [IMX_MU_RSR] = 0x60,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x64,
+ [IMX_MU_GCR] = 0x64,
+ [IMX_MU_TCR] = 0x64,
+ [IMX_MU_RCR] = 0x64,
+ },
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
+ .type = IMX_MU_V2,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {
+ [IMX_MU_SR] = 0xC,
+ [IMX_MU_GSR] = 0x118,
+ [IMX_MU_TSR] = 0x124,
+ [IMX_MU_RSR] = 0x12C,
+ },
+ .xCR = {
+ [IMX_MU_GIER] = 0x110,
+ [IMX_MU_GCR] = 0x114,
+ [IMX_MU_TCR] = 0x120,
+ [IMX_MU_RCR] = 0x128
+ },
+};
+
+static int __init imx_mu_of_init(struct device_node *dn,
+ struct device_node *parent,
+ const struct imx_mu_dcfg *cfg)
+{
+ struct platform_device *pdev = of_find_device_by_node(dn);
+ struct device_link *pd_link_a;
+ struct device_link *pd_link_b;
+ struct imx_mu_msi *msi_data;
+ struct resource *res;
+ struct device *pd_a;
+ struct device *pd_b;
+ struct device *dev;
+ int ret;
+ int irq;
+
+ dev = &pdev->dev;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ msi_data->cfg = cfg;
+
+ msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side");
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side");
+ if (!res)
+ return -EIO;
+
+ msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, msi_data);
+
+ msi_data->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(msi_data->clk))
+ return PTR_ERR(msi_data->clk);
+
+ pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side");
+ if (IS_ERR(pd_a))
+ return PTR_ERR(pd_a);
+
+ pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side");
+ if (IS_ERR(pd_b))
+ return PTR_ERR(pd_b);
+
+ pd_link_a = device_link_add(dev, pd_a,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+ if (!pd_link_a) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_a;
+ }
+
+ pd_link_b = device_link_add(dev, pd_b,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+
+
+ if (!pd_link_b) {
+ dev_err(dev, "Failed to add device_link to mu a.\n");
+ goto err_pd_b;
+ }
+
+ ret = imx_mu_msi_domains_init(msi_data, dev);
+ if (ret)
+ goto err_dm_init;
+
+ pm_runtime_enable(dev);
+
+ irq_set_chained_handler_and_data(irq,
+ imx_mu_msi_irq_handler,
+ msi_data);
+
+ return 0;
+
+err_dm_init:
+ device_link_remove(dev, pd_b);
+err_pd_b:
+ device_link_remove(dev, pd_a);
+err_pd_a:
+ return -EINVAL;
+}
+
+static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_msi *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
+static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
+}
+
+static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
+}
+
+static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
+IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
+IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
+IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
+
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("Freescale MU MSI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
new file mode 100644
index 000000000..3363f83bd
--- /dev/null
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx SoCs TCU IRQ driver
+ * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+struct ingenic_tcu {
+ struct regmap *map;
+ struct clk *clk;
+ struct irq_domain *domain;
+ unsigned int nb_parent_irqs;
+ u32 parent_irqs[3];
+};
+
+static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
+{
+ struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data);
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ struct regmap *map = gc->private;
+ uint32_t irq_reg, irq_mask;
+ unsigned long bits;
+ unsigned int i;
+
+ regmap_read(map, TCU_REG_TFR, &irq_reg);
+ regmap_read(map, TCU_REG_TMR, &irq_mask);
+
+ chained_irq_enter(irq_chip, desc);
+
+ irq_reg &= ~irq_mask;
+ bits = irq_reg;
+
+ for_each_set_bit(i, &bits, 32)
+ generic_handle_domain_irq(domain, i);
+
+ chained_irq_exit(irq_chip, desc);
+}
+
+static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct regmap *map = gc->private;
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ regmap_write(map, ct->regs.ack, mask);
+ regmap_write(map, ct->regs.enable, mask);
+ *ct->mask_cache |= mask;
+ irq_gc_unlock(gc);
+}
+
+static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct regmap *map = gc->private;
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ regmap_write(map, ct->regs.disable, mask);
+ *ct->mask_cache &= ~mask;
+ irq_gc_unlock(gc);
+}
+
+static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct regmap *map = gc->private;
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ regmap_write(map, ct->regs.ack, mask);
+ regmap_write(map, ct->regs.disable, mask);
+ irq_gc_unlock(gc);
+}
+
+static int __init ingenic_tcu_irq_init(struct device_node *np,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct ingenic_tcu *tcu;
+ struct regmap *map;
+ unsigned int i;
+ int ret, irqs;
+
+ map = device_node_to_regmap(np);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ tcu = kzalloc(sizeof(*tcu), GFP_KERNEL);
+ if (!tcu)
+ return -ENOMEM;
+
+ tcu->map = map;
+
+ irqs = of_property_count_elems_of_size(np, "interrupts", sizeof(u32));
+ if (irqs < 0 || irqs > ARRAY_SIZE(tcu->parent_irqs)) {
+ pr_crit("%s: Invalid 'interrupts' property\n", __func__);
+ ret = -EINVAL;
+ goto err_free_tcu;
+ }
+
+ tcu->nb_parent_irqs = irqs;
+
+ tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops,
+ NULL);
+ if (!tcu->domain) {
+ ret = -ENOMEM;
+ goto err_free_tcu;
+ }
+
+ ret = irq_alloc_domain_generic_chips(tcu->domain, 32, 1, "TCU",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (ret) {
+ pr_crit("%s: Invalid 'interrupts' property\n", __func__);
+ goto out_domain_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(tcu->domain, 0);
+ ct = gc->chip_types;
+
+ gc->wake_enabled = IRQ_MSK(32);
+ gc->private = tcu->map;
+
+ ct->regs.disable = TCU_REG_TMSR;
+ ct->regs.enable = TCU_REG_TMCR;
+ ct->regs.ack = TCU_REG_TFCR;
+ ct->chip.irq_unmask = ingenic_tcu_gc_unmask_enable_reg;
+ ct->chip.irq_mask = ingenic_tcu_gc_mask_disable_reg;
+ ct->chip.irq_mask_ack = ingenic_tcu_gc_mask_disable_reg_and_ack;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+ /* Mask all IRQs by default */
+ regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32));
+
+ /*
+ * On JZ4740, timer 0 and timer 1 have their own interrupt line;
+ * timers 2-7 share one interrupt.
+ * On SoCs >= JZ4770, timer 5 has its own interrupt line;
+ * timers 0-4 and 6-7 share one single interrupt.
+ *
+ * To keep things simple, we just register the same handler to
+ * all parent interrupts. The handler will properly detect which
+ * channel fired the interrupt.
+ */
+ for (i = 0; i < irqs; i++) {
+ tcu->parent_irqs[i] = irq_of_parse_and_map(np, i);
+ if (!tcu->parent_irqs[i]) {
+ ret = -EINVAL;
+ goto out_unmap_irqs;
+ }
+
+ irq_set_chained_handler_and_data(tcu->parent_irqs[i],
+ ingenic_tcu_intc_cascade,
+ tcu->domain);
+ }
+
+ return 0;
+
+out_unmap_irqs:
+ for (; i > 0; i--)
+ irq_dispose_mapping(tcu->parent_irqs[i - 1]);
+out_domain_remove:
+ irq_domain_remove(tcu->domain);
+err_free_tcu:
+ kfree(tcu);
+ return ret;
+}
+IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
new file mode 100644
index 000000000..cee839ca6
--- /dev/null
+++ b/drivers/irqchip/irq-ingenic.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * Ingenic XBurst platform IRQ support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+struct ingenic_intc_data {
+ void __iomem *base;
+ struct irq_domain *domain;
+ unsigned num_chips;
+};
+
+#define JZ_REG_INTC_STATUS 0x00
+#define JZ_REG_INTC_MASK 0x04
+#define JZ_REG_INTC_SET_MASK 0x08
+#define JZ_REG_INTC_CLEAR_MASK 0x0c
+#define JZ_REG_INTC_PENDING 0x10
+#define CHIP_SIZE 0x20
+
+static irqreturn_t intc_cascade(int irq, void *data)
+{
+ struct ingenic_intc_data *intc = irq_get_handler_data(irq);
+ struct irq_domain *domain = intc->domain;
+ struct irq_chip_generic *gc;
+ uint32_t pending;
+ unsigned i;
+
+ for (i = 0; i < intc->num_chips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING);
+ if (!pending)
+ continue;
+
+ while (pending) {
+ int bit = __fls(pending);
+
+ generic_handle_domain_irq(domain, bit + (i * 32));
+ pending &= ~BIT(bit);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ingenic_intc_of_init(struct device_node *node,
+ unsigned num_chips)
+{
+ struct ingenic_intc_data *intc;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct irq_domain *domain;
+ int parent_irq, err = 0;
+ unsigned i;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = irq_set_handler_data(parent_irq, intc);
+ if (err)
+ goto out_unmap_irq;
+
+ intc->num_chips = num_chips;
+ intc->base = of_iomap(node, 0);
+ if (!intc->base) {
+ err = -ENODEV;
+ goto out_unmap_irq;
+ }
+
+ domain = irq_domain_add_linear(node, num_chips * 32,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
+
+ intc->domain = domain;
+
+ err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (err)
+ goto out_domain_remove;
+
+ for (i = 0; i < num_chips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ gc->wake_enabled = IRQ_MSK(32);
+ gc->reg_base = intc->base + (i * CHIP_SIZE);
+
+ ct = gc->chip_types;
+ ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
+ ct->regs.disable = JZ_REG_INTC_SET_MASK;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ ct->chip.irq_set_wake = irq_gc_set_wake;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ /* Mask all irqs */
+ irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
+ }
+
+ if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND,
+ "SoC intc cascade interrupt", NULL))
+ pr_err("Failed to register SoC intc cascade interrupt\n");
+ return 0;
+
+out_domain_remove:
+ irq_domain_remove(domain);
+out_unmap_base:
+ iounmap(intc->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(intc);
+out_err:
+ return err;
+}
+
+static int __init intc_1chip_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return ingenic_intc_of_init(node, 1);
+}
+IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
+
+static int __init intc_2chip_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return ingenic_intc_of_init(node, 2);
+}
+IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init);
+IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
+IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
+IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
new file mode 100644
index 000000000..5fba907b9
--- /dev/null
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the IXP4xx interrupt controller
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IXP4XX_ICPR 0x00 /* Interrupt Status */
+#define IXP4XX_ICMR 0x04 /* Interrupt Enable */
+#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */
+#define IXP4XX_ICIP 0x0C /* IRQ Status */
+#define IXP4XX_ICFP 0x10 /* FIQ Status */
+#define IXP4XX_ICHR 0x14 /* Interrupt Priority */
+#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */
+#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */
+
+/* IXP43x and IXP46x-only */
+#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */
+#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */
+#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */
+#define IXP4XX_ICIP2 0x2C /* IRQ Status */
+#define IXP4XX_ICFP2 0x30 /* FIQ Status */
+#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */
+
+/**
+ * struct ixp4xx_irq - state container for the Faraday IRQ controller
+ * @irqbase: IRQ controller memory base in virtual memory
+ * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs)
+ * @irqchip: irqchip for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ixp4xx_irq {
+ void __iomem *irqbase;
+ bool is_356;
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+};
+
+/* Local static state container */
+static struct ixp4xx_irq ixirq;
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0 14
+#define IXP4XX_GPIO_CLK_1 15
+
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ /* All are level active high (asserted) here */
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ return 0;
+}
+
+static void ixp4xx_irq_mask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val &= ~BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val &= ~BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+/*
+ * Level triggered interrupts on GPIO lines can only be cleared when the
+ * interrupt condition disappears.
+ */
+static void ixp4xx_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val |= BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+static asmlinkage void __exception_irq_entry
+ixp4xx_handle_irq(struct pt_regs *regs)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ unsigned long status;
+ int i;
+
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
+ for_each_set_bit(i, &status, 32)
+ generic_handle_domain_irq(ixi->domain, i);
+
+ /*
+ * IXP465/IXP435 has an upper IRQ status register
+ */
+ if (ixi->is_356) {
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
+ for_each_set_bit(i, &status, 32)
+ generic_handle_domain_irq(ixi->domain, i + 32);
+ }
+}
+
+static int ixp4xx_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ixp4xx_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_irq *ixi = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ /*
+ * TODO: after converting IXP4xx to only device tree, set
+ * handle_bad_irq as default handler and assume all consumers
+ * call .set_type() as this is provided in the second cell in
+ * the device tree phandle.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixi->irqchip,
+ ixi,
+ handle_level_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * This needs to be a hierarchical irqdomain to work well with the
+ * GPIO irqchip (which is lower in the hierarchy)
+ */
+static const struct irq_domain_ops ixp4xx_irqdomain_ops = {
+ .translate = ixp4xx_irq_domain_translate,
+ .alloc = ixp4xx_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+/**
+ * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller
+ * @ixi: State container
+ * @irqbase: Virtual memory base for the interrupt controller
+ * @fwnode: Corresponding fwnode abstraction for this controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+static int __init ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+ void __iomem *irqbase,
+ struct fwnode_handle *fwnode,
+ bool is_356)
+{
+ int nr_irqs;
+
+ ixi->irqbase = irqbase;
+ ixi->is_356 = is_356;
+
+ /* Route all sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR);
+
+ /* Disable all interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR);
+
+ if (is_356) {
+ /* Route upper 32 sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2);
+
+ /* Disable upper 32 interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2);
+
+ nr_irqs = 64;
+ } else {
+ nr_irqs = 32;
+ }
+
+ ixi->irqchip.name = "IXP4xx";
+ ixi->irqchip.irq_mask = ixp4xx_irq_mask;
+ ixi->irqchip.irq_unmask = ixp4xx_irq_unmask;
+ ixi->irqchip.irq_set_type = ixp4xx_set_irq_type;
+
+ ixi->domain = irq_domain_create_linear(fwnode, nr_irqs,
+ &ixp4xx_irqdomain_ops,
+ ixi);
+ if (!ixi->domain) {
+ pr_crit("IXP4XX: can not add primary irqdomain\n");
+ return -ENODEV;
+ }
+
+ set_handle_irq(ixp4xx_handle_irq);
+
+ return 0;
+}
+
+static int __init ixp4xx_of_init_irq(struct device_node *np,
+ struct device_node *parent)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ bool is_356;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return -ENODEV;
+ }
+ fwnode = of_node_to_fwnode(np);
+
+ /* These chip variants have 64 interrupts */
+ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp45x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp46x-interrupt");
+
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret)
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+
+ return ret;
+}
+IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt",
+ ixp4xx_of_init_irq);
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
new file mode 100644
index 000000000..b9dcc8e78
--- /dev/null
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -0,0 +1,120 @@
+/*
+ * J-Core SoC AIC driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define JCORE_AIC_MAX_HWIRQ 127
+#define JCORE_AIC1_MIN_HWIRQ 16
+#define JCORE_AIC2_MIN_HWIRQ 64
+
+#define JCORE_AIC1_INTPRI_REG 8
+
+static struct irq_chip jcore_aic;
+
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+ if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+ handle_percpu_irq(desc);
+ else
+ handle_simple_irq(desc);
+}
+
+static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_chip *aic = d->host_data;
+
+ irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops jcore_aic_irqdomain_ops = {
+ .map = jcore_aic_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void noop(struct irq_data *data)
+{
+}
+
+static int __init aic_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
+ unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
+ struct irq_domain *domain;
+ int ret;
+
+ pr_info("Initializing J-Core AIC\n");
+
+ /* AIC1 needs priority initialization to receive interrupts. */
+ if (of_device_is_compatible(node, "jcore,aic1")) {
+ unsigned cpu;
+
+ for_each_present_cpu(cpu) {
+ void __iomem *base = of_iomap(node, cpu);
+
+ if (!base) {
+ pr_err("Unable to map AIC for cpu %u\n", cpu);
+ return -ENOMEM;
+ }
+ __raw_writel(0xffffffff, base + JCORE_AIC1_INTPRI_REG);
+ iounmap(base);
+ }
+ min_irq = JCORE_AIC1_MIN_HWIRQ;
+ }
+
+ /*
+ * The irq chip framework requires either mask/unmask or enable/disable
+ * function pointers to be provided, but the hardware does not have any
+ * such mechanism; the only interrupt masking is at the cpu level and
+ * it affects all interrupts. We provide dummy mask/unmask. The hardware
+ * handles all interrupt control and clears pending status when the cpu
+ * accepts the interrupt.
+ */
+ jcore_aic.irq_mask = noop;
+ jcore_aic.irq_unmask = noop;
+ jcore_aic.name = "AIC";
+
+ ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
+ of_node_to_nid(node));
+
+ if (ret < 0)
+ return ret;
+
+ domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ &jcore_aic_irqdomain_ops,
+ &jcore_aic);
+ if (!domain)
+ return -ENOMEM;
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(jcore_aic2, "jcore,aic2", aic_irq_of_init);
+IRQCHIP_DECLARE(jcore_aic1, "jcore,aic1", aic_irq_of_init);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
new file mode 100644
index 000000000..ba9792e60
--- /dev/null
+++ b/drivers/irqchip/irq-keystone.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Texas Instruments Keystone IRQ controller IP driver
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc.
+ * Author: Sajesh Kumar Saran <sajesh@ti.com>
+ * Grygorii Strashko <grygorii.strashko@ti.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* The source ID bits start from 4 to 31 (total 28 bits)*/
+#define BIT_OFS 4
+#define KEYSTONE_N_IRQ (32 - BIT_OFS)
+
+struct keystone_irq_device {
+ struct device *dev;
+ struct irq_chip chip;
+ u32 mask;
+ int irq;
+ struct irq_domain *irqd;
+ struct regmap *devctrl_regs;
+ u32 devctrl_offset;
+ raw_spinlock_t wa_lock;
+};
+
+static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = regmap_read(kirq->devctrl_regs, kirq->devctrl_offset, &val);
+ if (ret < 0)
+ dev_dbg(kirq->dev, "irq read failed ret(%d)\n", ret);
+ return val;
+}
+
+static inline void
+keystone_irq_writel(struct keystone_irq_device *kirq, u32 value)
+{
+ int ret;
+
+ ret = regmap_write(kirq->devctrl_regs, kirq->devctrl_offset, value);
+ if (ret < 0)
+ dev_dbg(kirq->dev, "irq write failed ret(%d)\n", ret);
+}
+
+static void keystone_irq_setmask(struct irq_data *d)
+{
+ struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->mask |= BIT(d->hwirq);
+ dev_dbg(kirq->dev, "mask %lu [%x]\n", d->hwirq, kirq->mask);
+}
+
+static void keystone_irq_unmask(struct irq_data *d)
+{
+ struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->mask &= ~BIT(d->hwirq);
+ dev_dbg(kirq->dev, "unmask %lu [%x]\n", d->hwirq, kirq->mask);
+}
+
+static void keystone_irq_ack(struct irq_data *d)
+{
+ /* nothing to do here */
+}
+
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
+{
+ struct keystone_irq_device *kirq = keystone_irq;
+ unsigned long wa_lock_flags;
+ unsigned long pending;
+ int src, err;
+
+ dev_dbg(kirq->dev, "start irq %d\n", irq);
+
+ pending = keystone_irq_readl(kirq);
+ keystone_irq_writel(kirq, pending);
+
+ dev_dbg(kirq->dev, "pending 0x%lx, mask 0x%x\n", pending, kirq->mask);
+
+ pending = (pending >> BIT_OFS) & ~kirq->mask;
+
+ dev_dbg(kirq->dev, "pending after mask 0x%lx\n", pending);
+
+ for (src = 0; src < KEYSTONE_N_IRQ; src++) {
+ if (BIT(src) & pending) {
+ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
+ err = generic_handle_domain_irq(kirq->irqd, src);
+ raw_spin_unlock_irqrestore(&kirq->wa_lock,
+ wa_lock_flags);
+
+ if (err)
+ dev_warn_ratelimited(kirq->dev, "spurious irq detected hwirq %d\n",
+ src);
+ }
+ }
+
+ dev_dbg(kirq->dev, "end irq %d\n", irq);
+ return IRQ_HANDLED;
+}
+
+static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct keystone_irq_device *kirq = h->host_data;
+
+ irq_set_chip_data(virq, kirq);
+ irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops keystone_irq_ops = {
+ .map = keystone_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int keystone_irq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct keystone_irq_device *kirq;
+ int ret;
+
+ if (np == NULL)
+ return -EINVAL;
+
+ kirq = devm_kzalloc(dev, sizeof(*kirq), GFP_KERNEL);
+ if (!kirq)
+ return -ENOMEM;
+
+ kirq->devctrl_regs =
+ syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ if (IS_ERR(kirq->devctrl_regs))
+ return PTR_ERR(kirq->devctrl_regs);
+
+ ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
+ &kirq->devctrl_offset);
+ if (ret) {
+ dev_err(dev, "couldn't read the devctrl_offset offset!\n");
+ return ret;
+ }
+
+ kirq->irq = platform_get_irq(pdev, 0);
+ if (kirq->irq < 0)
+ return kirq->irq;
+
+ kirq->dev = dev;
+ kirq->mask = ~0x0;
+ kirq->chip.name = "keystone-irq";
+ kirq->chip.irq_ack = keystone_irq_ack;
+ kirq->chip.irq_mask = keystone_irq_setmask;
+ kirq->chip.irq_unmask = keystone_irq_unmask;
+
+ kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
+ &keystone_irq_ops, kirq);
+ if (!kirq->irqd) {
+ dev_err(dev, "IRQ domain registration failed\n");
+ return -ENODEV;
+ }
+
+ raw_spin_lock_init(&kirq->wa_lock);
+
+ platform_set_drvdata(pdev, kirq);
+
+ ret = request_irq(kirq->irq, keystone_irq_handler,
+ 0, dev_name(dev), kirq);
+ if (ret) {
+ irq_domain_remove(kirq->irqd);
+ return ret;
+ }
+
+ /* clear all source bits */
+ keystone_irq_writel(kirq, ~0x0);
+
+ dev_info(dev, "irqchip registered, nr_irqs %u\n", KEYSTONE_N_IRQ);
+
+ return 0;
+}
+
+static int keystone_irq_remove(struct platform_device *pdev)
+{
+ struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
+ int hwirq;
+
+ free_irq(kirq->irq, kirq);
+
+ for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
+
+ irq_domain_remove(kirq->irqd);
+ return 0;
+}
+
+static const struct of_device_id keystone_irq_dt_ids[] = {
+ { .compatible = "ti,keystone-irq", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
+
+static struct platform_driver keystone_irq_device_driver = {
+ .probe = keystone_irq_probe,
+ .remove = keystone_irq_remove,
+ .driver = {
+ .name = "keystone_irq",
+ .of_match_table = of_match_ptr(keystone_irq_dt_ids),
+ }
+};
+
+module_platform_driver(keystone_irq_device_driver);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_AUTHOR("Sajesh Kumar Saran");
+MODULE_AUTHOR("Grygorii Strashko");
+MODULE_DESCRIPTION("Keystone IRQ chip");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
new file mode 100644
index 000000000..fdec3e9cf
--- /dev/null
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+
+static struct irq_domain *irq_domain;
+struct fwnode_handle *cpuintc_handle;
+
+static u32 lpic_gsi_to_irq(u32 gsi)
+{
+ /* Only pch irqdomain transferring is required for LoongArch. */
+ if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
+ return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+
+ return 0;
+}
+
+static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
+{
+ int id;
+ struct fwnode_handle *domain_handle = NULL;
+
+ switch (gsi) {
+ case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
+ if (liointc_handle)
+ domain_handle = liointc_handle;
+ break;
+
+ case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
+ if (pch_lpc_handle)
+ domain_handle = pch_lpc_handle;
+ break;
+
+ case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
+ id = find_pch_pic(gsi);
+ if (id >= 0 && pch_pic_handle[id])
+ domain_handle = pch_pic_handle[id];
+ break;
+ }
+
+ return domain_handle;
+}
+
+static void mask_loongarch_irq(struct irq_data *d)
+{
+ clear_csr_ecfg(ECFGF(d->hwirq));
+}
+
+static void unmask_loongarch_irq(struct irq_data *d)
+{
+ set_csr_ecfg(ECFGF(d->hwirq));
+}
+
+static struct irq_chip cpu_irq_controller = {
+ .name = "CPUINTC",
+ .irq_mask = mask_loongarch_irq,
+ .irq_unmask = unmask_loongarch_irq,
+};
+
+static void handle_cpu_irq(struct pt_regs *regs)
+{
+ int hwirq;
+ unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
+
+ while ((hwirq = ffs(estat))) {
+ estat &= ~BIT(hwirq - 1);
+ generic_handle_domain_irq(irq_domain, hwirq - 1);
+ }
+}
+
+static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_noprobe(irq);
+ irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
+ .map = loongarch_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init liointc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
+
+ return liointc_acpi_init(irq_domain, liointc_entry);
+}
+
+static int __init eiointc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
+
+ return eiointc_acpi_init(irq_domain, eiointc_entry);
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ int r;
+
+ r = acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, liointc_parse_madt, 0);
+ if (r < 0)
+ return r;
+
+ r = acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, eiointc_parse_madt, 0);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ int ret;
+
+ if (irq_domain)
+ return 0;
+
+ /* Mask interrupts. */
+ clear_csr_ecfg(ECFG0_IM);
+ clear_csr_estat(ESTATF_IP);
+
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
+ irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
+ &loongarch_cpu_intc_irq_domain_ops, NULL);
+
+ if (!irq_domain)
+ panic("Failed to add irqdomain for LoongArch CPU");
+
+ set_handle_irq(&handle_cpu_irq);
+ acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
+ acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
+ ret = acpi_cascade_irqdomain_init();
+
+ return ret;
+}
+
+IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
+ NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
new file mode 100644
index 000000000..3d99b8bdd
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson Extend I/O Interrupt Controller support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "eiointc: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define EIOINTC_REG_NODEMAP 0x14a0
+#define EIOINTC_REG_IPMAP 0x14c0
+#define EIOINTC_REG_ENABLE 0x1600
+#define EIOINTC_REG_BOUNCE 0x1680
+#define EIOINTC_REG_ISR 0x1800
+#define EIOINTC_REG_ROUTE 0x1c00
+
+#define VEC_REG_COUNT 4
+#define VEC_COUNT_PER_REG 64
+#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
+#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
+#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
+#define EIOINTC_ALL_ENABLE 0xffffffff
+
+#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
+
+static int nr_pics;
+
+struct eiointc_priv {
+ u32 node;
+ nodemask_t node_map;
+ cpumask_t cpuspan_map;
+ struct fwnode_handle *domain_handle;
+ struct irq_domain *eiointc_domain;
+};
+
+static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
+
+static void eiointc_enable(void)
+{
+ uint64_t misc;
+
+ misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+ misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
+ iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+static int cpu_to_eio_node(int cpu)
+{
+ return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+}
+
+static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
+{
+ int i, node, cpu_node, route_node;
+ unsigned char coremap;
+ uint32_t pos_off, data, data_byte, data_mask;
+
+ pos_off = pos & ~3;
+ data_byte = pos & 3;
+ data_mask = ~BIT_MASK(data_byte) & 0xf;
+
+ /* Calculate node and coremap of target irq */
+ cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
+ coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
+
+ for_each_online_cpu(i) {
+ node = cpu_to_eio_node(i);
+ if (!node_isset(node, *node_map))
+ continue;
+
+ /* EIO node 0 is in charge of inter-node interrupt dispatch */
+ route_node = (node == mnode) ? cpu_node : node;
+ data = ((coremap | (route_node << 4)) << (data_byte * 8));
+ csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
+ }
+}
+
+static DEFINE_RAW_SPINLOCK(affinity_lock);
+
+static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
+{
+ unsigned int cpu;
+ unsigned long flags;
+ uint32_t vector, regaddr;
+ struct cpumask intersect_affinity;
+ struct eiointc_priv *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&affinity_lock, flags);
+
+ cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
+ cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
+
+ if (cpumask_empty(&intersect_affinity)) {
+ raw_spin_unlock_irqrestore(&affinity_lock, flags);
+ return -EINVAL;
+ }
+ cpu = cpumask_first(&intersect_affinity);
+
+ vector = d->hwirq;
+ regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
+
+ /* Mask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
+ /* Set route for target vector */
+ eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
+ /* Unmask target vector */
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ raw_spin_unlock_irqrestore(&affinity_lock, flags);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int eiointc_index(int node)
+{
+ int i;
+
+ for (i = 0; i < nr_pics; i++) {
+ if (node_isset(node, eiointc_priv[i]->node_map))
+ return i;
+ }
+
+ return -1;
+}
+
+static int eiointc_router_init(unsigned int cpu)
+{
+ int i, bit;
+ uint32_t data;
+ uint32_t node = cpu_to_eio_node(cpu);
+ int index = eiointc_index(node);
+
+ if (index < 0) {
+ pr_err("Error: invalid nodemap!\n");
+ return -1;
+ }
+
+ if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
+ eiointc_enable();
+
+ for (i = 0; i < VEC_COUNT / 32; i++) {
+ data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
+ iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
+ bit = BIT(1 + index); /* Route to IP[1 + index] */
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 4; i++) {
+ /* Route to Node-0 Core-0 */
+ if (index == 0)
+ bit = BIT(cpu_logical_map(0));
+ else
+ bit = (eiointc_priv[index]->node << 4) | 1;
+
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
+ }
+
+ for (i = 0; i < VEC_COUNT / 32; i++) {
+ data = 0xffffffff;
+ iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
+ iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
+ }
+ }
+
+ return 0;
+}
+
+static void eiointc_irq_dispatch(struct irq_desc *desc)
+{
+ int i;
+ u64 pending;
+ bool handled = false;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < VEC_REG_COUNT; i++) {
+ pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
+ while (pending) {
+ int bit = __ffs(pending);
+ int irq = bit + VEC_COUNT_PER_REG * i;
+
+ generic_handle_domain_irq(priv->eiointc_domain, irq);
+ pending &= ~BIT(bit);
+ handled = true;
+ }
+ }
+
+ if (!handled)
+ spurious_interrupt();
+
+ chained_irq_exit(chip, desc);
+}
+
+static void eiointc_ack_irq(struct irq_data *d)
+{
+}
+
+static void eiointc_mask_irq(struct irq_data *d)
+{
+}
+
+static void eiointc_unmask_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip eiointc_irq_chip = {
+ .name = "EIOINTC",
+ .irq_ack = eiointc_ack_irq,
+ .irq_mask = eiointc_mask_irq,
+ .irq_unmask = eiointc_unmask_irq,
+ .irq_set_affinity = eiointc_set_irq_affinity,
+};
+
+static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ unsigned int i, type;
+ unsigned long hwirq = 0;
+ struct eiointc *priv = domain->host_data;
+
+ ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
+ priv, handle_edge_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops eiointc_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = eiointc_domain_alloc,
+ .free = eiointc_domain_free,
+};
+
+static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (node == vec_group[i].node) {
+ vec_group[i].parent = parent;
+ return;
+ }
+ }
+}
+
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (node == vec_group[i].node)
+ return vec_group[i].parent;
+ }
+ return NULL;
+}
+
+static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
+ unsigned int node = (pchpic_entry->address >> 44) & 0xf;
+ struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
+
+ if (parent)
+ return pch_pic_acpi_init(parent, pchpic_entry);
+
+ return 0;
+}
+
+static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct irq_domain *parent;
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+ int node;
+
+ if (cpu_has_flatmode)
+ node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ else
+ node = eiointc_priv[nr_pics - 1]->node;
+
+ parent = acpi_get_vec_parent(node, msi_group);
+
+ if (parent)
+ return pch_msi_acpi_init(parent, pchmsi_entry);
+
+ return 0;
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ int r;
+
+ r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
+ if (r < 0)
+ return r;
+
+ r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int __init eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc)
+{
+ int i, ret, parent_irq;
+ unsigned long node_map;
+ struct eiointc_priv *priv;
+ int node;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
+ if (!priv->domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ goto out_free_priv;
+ }
+
+ priv->node = acpi_eiointc->node;
+ node_map = acpi_eiointc->node_map ? : -1ULL;
+
+ for_each_possible_cpu(i) {
+ if (node_map & (1ULL << cpu_to_eio_node(i))) {
+ node_set(cpu_to_eio_node(i), priv->node_map);
+ cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
+ }
+ }
+
+ /* Setup IRQ domain */
+ priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
+ &eiointc_domain_ops, priv);
+ if (!priv->eiointc_domain) {
+ pr_err("loongson-eiointc: cannot add IRQ domain\n");
+ goto out_free_handle;
+ }
+
+ eiointc_priv[nr_pics++] = priv;
+
+ eiointc_router_init(0);
+
+ parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+
+ if (nr_pics == 1) {
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ "irqchip/loongarch/intc:starting",
+ eiointc_router_init, NULL);
+ }
+
+ if (cpu_has_flatmode)
+ node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ else
+ node = acpi_eiointc->node;
+ acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
+ acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
+ ret = acpi_cascade_irqdomain_init();
+
+ return ret;
+
+out_free_handle:
+ irq_domain_free_fwnode(priv->domain_handle);
+ priv->domain_handle = NULL;
+out_free_priv:
+ kfree(priv);
+
+ return -ENOMEM;
+}
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
new file mode 100644
index 000000000..f4abdf156
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson HTPIC IRQ support
+ */
+
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/i8259.h>
+
+#define HTPIC_MAX_PARENT_IRQ 4
+#define HTINT_NUM_VECTORS 8
+#define HTINT_EN_OFF 0x20
+
+struct loongson_htpic {
+ void __iomem *base;
+ struct irq_domain *domain;
+};
+
+static struct loongson_htpic *htpic;
+
+static void htpic_irq_dispatch(struct irq_desc *desc)
+{
+ struct loongson_htpic *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ uint32_t pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(priv->base);
+ /* Ack all IRQs at once, otherwise IRQ flood might happen */
+ writel(pending, priv->base);
+
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ if (unlikely(bit > 15)) {
+ spurious_interrupt();
+ break;
+ }
+
+ generic_handle_domain_irq(priv->domain, bit);
+ pending &= ~BIT(bit);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static void htpic_reg_init(void)
+{
+ int i;
+
+ for (i = 0; i < HTINT_NUM_VECTORS; i++) {
+ /* Disable all HT Vectors */
+ writel(0x0, htpic->base + HTINT_EN_OFF + i * 0x4);
+ /* Read back to force write */
+ (void) readl(htpic->base + i * 0x4);
+ /* Ack all possible pending IRQs */
+ writel(GENMASK(31, 0), htpic->base + i * 0x4);
+ }
+
+ /* Enable 16 vectors for PIC */
+ writel(0xffff, htpic->base + HTINT_EN_OFF);
+}
+
+static void htpic_resume(void)
+{
+ htpic_reg_init();
+}
+
+struct syscore_ops htpic_syscore_ops = {
+ .resume = htpic_resume,
+};
+
+static int __init htpic_of_init(struct device_node *node, struct device_node *parent)
+{
+ unsigned int parent_irq[4];
+ int i, err;
+ int num_parents = 0;
+
+ if (htpic) {
+ pr_err("loongson-htpic: Only one HTPIC is allowed in the system\n");
+ return -ENODEV;
+ }
+
+ htpic = kzalloc(sizeof(*htpic), GFP_KERNEL);
+ if (!htpic)
+ return -ENOMEM;
+
+ htpic->base = of_iomap(node, 0);
+ if (!htpic->base) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ htpic->domain = __init_i8259_irqs(node);
+ if (!htpic->domain) {
+ pr_err("loongson-htpic: Failed to initialize i8259 IRQs\n");
+ err = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ /* Interrupt may come from any of the 4 interrupt line */
+ for (i = 0; i < HTPIC_MAX_PARENT_IRQ; i++) {
+ parent_irq[i] = irq_of_parse_and_map(node, i);
+ if (parent_irq[i] <= 0)
+ break;
+
+ num_parents++;
+ }
+
+ if (!num_parents) {
+ pr_err("loongson-htpic: Failed to get parent irqs\n");
+ err = -ENODEV;
+ goto out_remove_domain;
+ }
+
+ htpic_reg_init();
+
+ for (i = 0; i < num_parents; i++) {
+ irq_set_chained_handler_and_data(parent_irq[i],
+ htpic_irq_dispatch, htpic);
+ }
+
+ register_syscore_ops(&htpic_syscore_ops);
+
+ return 0;
+
+out_remove_domain:
+ irq_domain_remove(htpic->domain);
+out_iounmap:
+ iounmap(htpic->base);
+out_free:
+ kfree(htpic);
+ return err;
+}
+
+IRQCHIP_DECLARE(loongson_htpic, "loongson,htpic-1.0", htpic_of_init);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
new file mode 100644
index 000000000..60a335d7e
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson HyperTransport Interrupt Vector support
+ */
+
+#define pr_fmt(fmt) "htvec: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define HTVEC_EN_OFF 0x20
+#define HTVEC_MAX_PARENT_IRQ 8
+
+#define VEC_COUNT_PER_REG 32
+#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
+#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
+
+struct htvec {
+ int num_parents;
+ void __iomem *base;
+ struct irq_domain *htvec_domain;
+ raw_spinlock_t htvec_lock;
+};
+
+static void htvec_irq_dispatch(struct irq_desc *desc)
+{
+ int i;
+ u32 pending;
+ bool handled = false;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct htvec *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < priv->num_parents; i++) {
+ pending = readl(priv->base + 4 * i);
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_domain_irq(priv->htvec_domain,
+ bit + VEC_COUNT_PER_REG * i);
+ pending &= ~BIT(bit);
+ handled = true;
+ }
+ }
+
+ if (!handled)
+ spurious_interrupt();
+
+ chained_irq_exit(chip, desc);
+}
+
+static void htvec_ack_irq(struct irq_data *d)
+{
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(VEC_REG_BIT(d->hwirq)),
+ priv->base + VEC_REG_IDX(d->hwirq) * 4);
+}
+
+static void htvec_mask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg &= ~BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static void htvec_unmask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg |= BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static struct irq_chip htvec_irq_chip = {
+ .name = "LOONGSON_HTVEC",
+ .irq_mask = htvec_mask_irq,
+ .irq_unmask = htvec_unmask_irq,
+ .irq_ack = htvec_ack_irq,
+};
+
+static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ unsigned long hwirq;
+ unsigned int type, i;
+ struct htvec *priv = domain->host_data;
+
+ ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip,
+ priv, handle_edge_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void htvec_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops htvec_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = htvec_domain_alloc,
+ .free = htvec_domain_free,
+};
+
+static void htvec_reset(struct htvec *priv)
+{
+ u32 idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (idx = 0; idx < priv->num_parents; idx++) {
+ writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
+ writel_relaxed(0xFFFFFFFF, priv->base + 4 * idx);
+ }
+}
+
+static int htvec_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct htvec *priv;
+ int err, parent_irq[8], i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->htvec_lock);
+ priv->base = of_iomap(node, 0);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto free_priv;
+ }
+
+ /* Interrupt may come from any of the 8 interrupt lines */
+ for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
+ parent_irq[i] = irq_of_parse_and_map(node, i);
+ if (parent_irq[i] <= 0)
+ break;
+
+ priv->num_parents++;
+ }
+
+ if (!priv->num_parents) {
+ pr_err("Failed to get parent irqs\n");
+ err = -ENODEV;
+ goto iounmap_base;
+ }
+
+ priv->htvec_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ (VEC_COUNT_PER_REG * priv->num_parents),
+ &htvec_domain_ops, priv);
+ if (!priv->htvec_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ err = -ENOMEM;
+ goto irq_dispose;
+ }
+
+ htvec_reset(priv);
+
+ for (i = 0; i < priv->num_parents; i++)
+ irq_set_chained_handler_and_data(parent_irq[i],
+ htvec_irq_dispatch, priv);
+
+ return 0;
+
+irq_dispose:
+ for (; i > 0; i--)
+ irq_dispose_mapping(parent_irq[i - 1]);
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(htvec, "loongson,htvec-1.0", htvec_of_init);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
new file mode 100644
index 000000000..c4584e2f0
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson Local IO Interrupt Controller support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/irqchip/chained_irq.h>
+
+#ifdef CONFIG_MIPS
+#include <loongson.h>
+#else
+#include <asm/loongson.h>
+#endif
+
+#define LIOINTC_CHIP_IRQ 32
+#define LIOINTC_NUM_PARENT 4
+#define LIOINTC_NUM_CORES 4
+
+#define LIOINTC_INTC_CHIP_START 0x20
+
+#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
+#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
+#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
+#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
+#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
+
+#define LIOINTC_SHIFT_INTx 4
+
+#define LIOINTC_ERRATA_IRQ 10
+
+#if defined(CONFIG_MIPS)
+#define liointc_core_id get_ebase_cpunum()
+#else
+#define liointc_core_id get_csr_cpuid()
+#endif
+
+struct liointc_handler_data {
+ struct liointc_priv *priv;
+ u32 parent_int_map;
+};
+
+struct liointc_priv {
+ struct irq_chip_generic *gc;
+ struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
+ void __iomem *core_isr[LIOINTC_NUM_CORES];
+ u8 map_cache[LIOINTC_CHIP_IRQ];
+ bool has_lpc_irq_errata;
+};
+
+struct fwnode_handle *liointc_handle;
+
+static void liointc_chained_handle_irq(struct irq_desc *desc)
+{
+ struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_chip_generic *gc = handler->priv->gc;
+ int core = liointc_core_id % LIOINTC_NUM_CORES;
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+
+ pending = readl(handler->priv->core_isr[core]);
+
+ if (!pending) {
+ /* Always blame LPC IRQ if we have that bug */
+ if (handler->priv->has_lpc_irq_errata &&
+ (handler->parent_int_map & gc->mask_cache &
+ BIT(LIOINTC_ERRATA_IRQ)))
+ pending = BIT(LIOINTC_ERRATA_IRQ);
+ else
+ spurious_interrupt();
+ }
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_domain_irq(gc->domain, bit);
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void liointc_set_bit(struct irq_chip_generic *gc,
+ unsigned int offset,
+ u32 mask, bool set)
+{
+ if (set)
+ writel(readl(gc->reg_base + offset) | mask,
+ gc->reg_base + offset);
+ else
+ writel(readl(gc->reg_base + offset) & ~mask,
+ gc->reg_base + offset);
+}
+
+static int liointc_set_type(struct irq_data *data, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ u32 mask = data->mask;
+ unsigned long flags;
+
+ irq_gc_lock_irqsave(gc, flags);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ break;
+ default:
+ irq_gc_unlock_irqrestore(gc, flags);
+ return -EINVAL;
+ }
+ irq_gc_unlock_irqrestore(gc, flags);
+
+ irqd_set_trigger_type(data, type);
+ return 0;
+}
+
+static void liointc_resume(struct irq_chip_generic *gc)
+{
+ struct liointc_priv *priv = gc->private;
+ unsigned long flags;
+ int i;
+
+ irq_gc_lock_irqsave(gc, flags);
+ /* Disable all at first */
+ writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
+ /* Restore map cache */
+ for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
+ writeb(priv->map_cache[i], gc->reg_base + i);
+ /* Restore mask cache */
+ writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
+ irq_gc_unlock_irqrestore(gc, flags);
+}
+
+static int parent_irq[LIOINTC_NUM_PARENT];
+static u32 parent_int_map[LIOINTC_NUM_PARENT];
+static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
+static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
+
+static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops acpi_irq_gc_ops = {
+ .map = irq_map_generic_chip,
+ .unmap = irq_unmap_generic_chip,
+ .xlate = liointc_domain_xlate,
+};
+
+static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ struct fwnode_handle *domain_handle, struct device_node *node)
+{
+ int i, err;
+ void __iomem *base;
+ struct irq_chip_type *ct;
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ struct liointc_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = ioremap(addr, size);
+ if (!base)
+ goto out_free_priv;
+
+ for (i = 0; i < LIOINTC_NUM_CORES; i++)
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ priv->handler[i].parent_int_map = parent_int_map[i];
+
+ if (revision > 1) {
+ for (i = 0; i < LIOINTC_NUM_CORES; i++) {
+ int index = of_property_match_string(node,
+ "reg-names", core_reg_names[i]);
+
+ if (index < 0)
+ continue;
+
+ priv->core_isr[i] = of_iomap(node, index);
+ }
+
+ if (!priv->core_isr[0])
+ goto out_iounmap;
+ }
+
+ /* Setup IRQ domain */
+ if (!acpi_disabled)
+ domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
+ &acpi_irq_gc_ops, priv);
+ else
+ domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
+ &irq_generic_chip_ops, priv);
+ if (!domain) {
+ pr_err("loongson-liointc: cannot add IRQ domain\n");
+ goto out_iounmap;
+ }
+
+ err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
+ (node ? node->full_name : "LIOINTC"),
+ handle_level_irq, 0, IRQ_NOPROBE, 0);
+ if (err) {
+ pr_err("loongson-liointc: unable to register IRQ domain\n");
+ goto out_free_domain;
+ }
+
+
+ /* Disable all IRQs */
+ writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
+ /* Set to level triggered */
+ writel(0x0, base + LIOINTC_REG_INTC_EDGE);
+
+ /* Generate parent INT part of map cache */
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ u32 pending = priv->handler[i].parent_int_map;
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
+ pending &= ~BIT(bit);
+ }
+ }
+
+ for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
+ /* Generate core part of map cache */
+ priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
+ writeb(priv->map_cache[i], base + i);
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->private = priv;
+ gc->reg_base = base;
+ gc->domain = domain;
+ gc->resume = liointc_resume;
+
+ ct = gc->chip_types;
+ ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
+ ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ ct->chip.irq_set_type = liointc_set_type;
+
+ gc->mask_cache = 0;
+ priv->gc = gc;
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ if (parent_irq[i] <= 0)
+ continue;
+
+ priv->handler[i].priv = priv;
+ irq_set_chained_handler_and_data(parent_irq[i],
+ liointc_chained_handle_irq, &priv->handler[i]);
+ }
+
+ liointc_handle = domain_handle;
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(domain);
+out_iounmap:
+ iounmap(base);
+out_free_priv:
+ kfree(priv);
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+
+static int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ bool have_parent = FALSE;
+ int sz, i, index, revision, err = 0;
+ struct resource res;
+
+ if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
+ index = 0;
+ revision = 1;
+ } else {
+ index = of_property_match_string(node, "reg-names", "main");
+ revision = 2;
+ }
+
+ if (of_address_to_resource(node, index, &res))
+ return -EINVAL;
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
+ if (parent_irq[i] > 0)
+ have_parent = TRUE;
+ }
+ if (!have_parent)
+ return -ENODEV;
+
+ sz = of_property_read_variable_u32_array(node,
+ "loongson,parent_int_map",
+ &parent_int_map[0],
+ LIOINTC_NUM_PARENT,
+ LIOINTC_NUM_PARENT);
+ if (sz < 4) {
+ pr_err("loongson-liointc: No parent_int_map\n");
+ return -ENODEV;
+ }
+
+ err = liointc_init(res.start, resource_size(&res),
+ revision, of_node_to_fwnode(node), node);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
+IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
+IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
+
+#endif
+
+#ifdef CONFIG_ACPI
+int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
+{
+ int ret;
+ struct fwnode_handle *domain_handle;
+
+ parent_int_map[0] = acpi_liointc->cascade_map[0];
+ parent_int_map[1] = acpi_liointc->cascade_map[1];
+
+ parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
+ parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
+ if (!domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ return -ENOMEM;
+ }
+ ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
+ 1, domain_handle, NULL);
+ if (ret)
+ irq_domain_free_fwnode(domain_handle);
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
new file mode 100644
index 000000000..bf2324910
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Loongson LPC Interrupt Controller support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "lpc: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+
+/* Registers */
+#define LPC_INT_CTL 0x00
+#define LPC_INT_ENA 0x04
+#define LPC_INT_STS 0x08
+#define LPC_INT_CLR 0x0c
+#define LPC_INT_POL 0x10
+#define LPC_COUNT 16
+
+/* LPC_INT_CTL */
+#define LPC_INT_CTL_EN BIT(31)
+
+struct pch_lpc {
+ void __iomem *base;
+ struct irq_domain *lpc_domain;
+ raw_spinlock_t lpc_lock;
+ u32 saved_reg_ctl;
+ u32 saved_reg_ena;
+ u32 saved_reg_pol;
+};
+
+struct fwnode_handle *pch_lpc_handle;
+
+static void lpc_irq_ack(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static void lpc_irq_mask(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))),
+ priv->base + LPC_INT_ENA);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static void lpc_irq_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+ struct pch_lpc *priv = d->domain->host_data;
+
+ raw_spin_lock_irqsave(&priv->lpc_lock, flags);
+ writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)),
+ priv->base + LPC_INT_ENA);
+ raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
+}
+
+static int lpc_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 val;
+ u32 mask = 0x1 << (d->hwirq);
+ struct pch_lpc *priv = d->domain->host_data;
+
+ if (!(type & IRQ_TYPE_LEVEL_MASK))
+ return 0;
+
+ val = readl(priv->base + LPC_INT_POL);
+
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel(val, priv->base + LPC_INT_POL);
+
+ return 0;
+}
+
+static const struct irq_chip pch_lpc_irq_chip = {
+ .name = "PCH LPC",
+ .irq_mask = lpc_irq_mask,
+ .irq_unmask = lpc_irq_unmask,
+ .irq_ack = lpc_irq_ack,
+ .irq_set_type = lpc_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void lpc_irq_dispatch(struct irq_desc *desc)
+{
+ u32 pending, bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pch_lpc *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ pending = readl(priv->base + LPC_INT_ENA);
+ pending &= readl(priv->base + LPC_INT_STS);
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ bit = __ffs(pending);
+
+ generic_handle_domain_irq(priv->lpc_domain, bit);
+ pending &= ~BIT(bit);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int pch_lpc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops pch_lpc_domain_ops = {
+ .map = pch_lpc_map,
+ .translate = irq_domain_translate_twocell,
+};
+
+static void pch_lpc_reset(struct pch_lpc *priv)
+{
+ /* Enable the LPC interrupt, bit31: en bit30: edge */
+ writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL);
+ writel(0, priv->base + LPC_INT_ENA);
+ /* Clear all 18-bit interrpt bit */
+ writel(GENMASK(17, 0), priv->base + LPC_INT_CLR);
+}
+
+static int pch_lpc_disabled(struct pch_lpc *priv)
+{
+ return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) &&
+ (readl(priv->base + LPC_INT_STS) == 0xffffffff);
+}
+
+int __init pch_lpc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_lpc_pic *acpi_pchlpc)
+{
+ int parent_irq;
+ struct pch_lpc *priv;
+ struct irq_fwspec fwspec;
+ struct fwnode_handle *irq_handle;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->lpc_lock);
+
+ priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size);
+ if (!priv->base)
+ goto free_priv;
+
+ if (pch_lpc_disabled(priv)) {
+ pr_err("Failed to get LPC status\n");
+ goto iounmap_base;
+ }
+
+ irq_handle = irq_domain_alloc_named_fwnode("lpcintc");
+ if (!irq_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ goto iounmap_base;
+ }
+
+ priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
+ &pch_lpc_domain_ops, priv);
+ if (!priv->lpc_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ goto free_irq_handle;
+ }
+ pch_lpc_reset(priv);
+
+ fwspec.fwnode = parent->fwnode;
+ fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ parent_irq = irq_create_fwspec_mapping(&fwspec);
+ irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv);
+
+ pch_lpc_handle = irq_handle;
+ return 0;
+
+free_irq_handle:
+ irq_domain_free_fwnode(irq_handle);
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return -ENOMEM;
+}
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
new file mode 100644
index 000000000..a72ede90f
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH MSI support
+ */
+
+#define pr_fmt(fmt) "pch-msi: " fmt
+
+#include <linux/irqchip.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+static int nr_pics;
+
+struct pch_msi_data {
+ struct mutex msi_map_lock;
+ phys_addr_t doorbell;
+ u32 irq_first; /* The vector number that MSIs starts */
+ u32 num_irqs; /* The number of vectors for MSIs */
+ unsigned long *msi_map;
+};
+
+static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
+
+static void pch_msi_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_msi_unmask_msi_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+static struct irq_chip pch_msi_irq_chip = {
+ .name = "PCH PCI MSI",
+ .irq_mask = pch_msi_mask_msi_irq,
+ .irq_unmask = pch_msi_unmask_msi_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
+{
+ int first;
+
+ mutex_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_free_region(priv->msi_map, priv->num_irqs,
+ get_count_order(num_req));
+ if (first < 0) {
+ mutex_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&priv->msi_map_lock);
+
+ return priv->irq_first + first;
+}
+
+static void pch_msi_free_hwirq(struct pch_msi_data *priv,
+ int hwirq, int num_req)
+{
+ int first = hwirq - priv->irq_first;
+
+ mutex_lock(&priv->msi_map_lock);
+ bitmap_release_region(priv->msi_map, first, get_count_order(num_req));
+ mutex_unlock(&priv->msi_map_lock);
+}
+
+static void pch_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(priv->doorbell);
+ msg->address_lo = lower_32_bits(priv->doorbell);
+ msg->data = data->hwirq;
+}
+
+static struct msi_domain_info pch_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &pch_msi_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "PCH MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = pch_msi_compose_msi_msg,
+};
+
+static int pch_msi_parent_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int hwirq)
+{
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pch_msi_data *priv = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = pch_msi_allocate_hwirq(priv, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = pch_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_hwirq:
+ pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+
+ return err;
+}
+
+static void pch_msi_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ pch_msi_free_hwirq(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops pch_msi_middle_domain_ops = {
+ .alloc = pch_msi_middle_domain_alloc,
+ .free = pch_msi_middle_domain_free,
+};
+
+static int pch_msi_init_domains(struct pch_msi_data *priv,
+ struct irq_domain *parent,
+ struct fwnode_handle *domain_handle)
+{
+ struct irq_domain *middle_domain, *msi_domain;
+
+ middle_domain = irq_domain_create_linear(domain_handle,
+ priv->num_irqs,
+ &pch_msi_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = parent;
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ msi_domain = pci_msi_create_irq_domain(domain_handle,
+ &pch_msi_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create PCI MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count,
+ struct irq_domain *parent_domain, struct fwnode_handle *domain_handle)
+{
+ int ret;
+ struct pch_msi_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->msi_map_lock);
+
+ priv->doorbell = msg_address;
+ priv->irq_first = irq_base;
+ priv->num_irqs = irq_count;
+
+ priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
+ if (!priv->msi_map)
+ goto err_priv;
+
+ pr_debug("Registering %d MSIs, starting at %d\n",
+ priv->num_irqs, priv->irq_first);
+
+ ret = pch_msi_init_domains(priv, parent_domain, domain_handle);
+ if (ret)
+ goto err_map;
+
+ pch_msi_handle[nr_pics++] = domain_handle;
+ return 0;
+
+err_map:
+ bitmap_free(priv->msi_map);
+err_priv:
+ kfree(priv);
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
+{
+ int err;
+ int irq_base, irq_count;
+ struct resource res;
+ struct irq_domain *parent_domain;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("Failed to allocate resource\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) {
+ pr_err("Unable to parse MSI vec base\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) {
+ pr_err("Unable to parse MSI vec number\n");
+ return -EINVAL;
+ }
+
+ err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
+#endif
+
+#ifdef CONFIG_ACPI
+struct fwnode_handle *get_pch_msi_handle(int pci_segment)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (msi_group[i].pci_segment == pci_segment)
+ return pch_msi_handle[i];
+ }
+ return NULL;
+}
+
+int __init pch_msi_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_msi_pic *acpi_pchmsi)
+{
+ int ret;
+ struct fwnode_handle *domain_handle;
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
+ ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
+ acpi_pchmsi->count, parent, domain_handle);
+ if (ret < 0)
+ irq_domain_free_fwnode(domain_handle);
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
new file mode 100644
index 000000000..583939509
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH PIC support
+ */
+
+#define pr_fmt(fmt) "pch-pic: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define PCH_PIC_MASK 0x20
+#define PCH_PIC_HTMSI_EN 0x40
+#define PCH_PIC_EDGE 0x60
+#define PCH_PIC_CLR 0x80
+#define PCH_PIC_AUTO0 0xc0
+#define PCH_PIC_AUTO1 0xe0
+#define PCH_INT_ROUTE(irq) (0x100 + irq)
+#define PCH_INT_HTVEC(irq) (0x200 + irq)
+#define PCH_PIC_POL 0x3e0
+
+#define PIC_COUNT_PER_REG 32
+#define PIC_REG_COUNT 2
+#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
+#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
+#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+
+static int nr_pics;
+
+struct pch_pic {
+ void __iomem *base;
+ struct irq_domain *pic_domain;
+ u32 ht_vec_base;
+ raw_spinlock_t pic_lock;
+ u32 vec_count;
+ u32 gsi_base;
+};
+
+static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
+
+struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
+
+static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg |= BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_bitclr(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg &= ~BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_mask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_pic_unmask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(PIC_REG_BIT(d->hwirq)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+
+ irq_chip_unmask_parent(d);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+}
+
+static int pch_pic_set_type(struct irq_data *d, unsigned int type)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int ret = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ irq_set_handler_locked(d, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ irq_set_handler_locked(d, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ irq_set_handler_locked(d, handle_level_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ irq_set_handler_locked(d, handle_level_irq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void pch_pic_ack_irq(struct irq_data *d)
+{
+ unsigned int reg;
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
+ if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
+ writel(BIT(PIC_REG_BIT(d->hwirq)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ }
+ irq_chip_ack_parent(d);
+}
+
+static struct irq_chip pch_pic_irq_chip = {
+ .name = "PCH PIC",
+ .irq_mask = pch_pic_mask_irq,
+ .irq_unmask = pch_pic_unmask_irq,
+ .irq_ack = pch_pic_ack_irq,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = pch_pic_set_type,
+};
+
+static int pch_pic_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct pch_pic *priv = d->host_data;
+ struct device_node *of_node = to_of_node(fwspec->fwnode);
+
+ if (fwspec->param_count < 1)
+ return -EINVAL;
+
+ if (of_node) {
+ if (fwspec->param_count < 2)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ } else {
+ *hwirq = fwspec->param[0] - priv->gsi_base;
+ *type = IRQ_TYPE_NONE;
+ }
+
+ return 0;
+}
+
+static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int err;
+ unsigned int type;
+ unsigned long hwirq;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec parent_fwspec;
+ struct pch_pic *priv = domain->host_data;
+
+ err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type);
+ if (err)
+ return err;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &pch_pic_irq_chip, priv,
+ handle_level_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops pch_pic_domain_ops = {
+ .translate = pch_pic_domain_translate,
+ .alloc = pch_pic_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void pch_pic_reset(struct pch_pic *priv)
+{
+ int i;
+
+ for (i = 0; i < PIC_COUNT; i++) {
+ /* Write vector ID */
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ /* Hardcode route to HT0 Lo */
+ writeb(1, priv->base + PCH_INT_ROUTE(i));
+ }
+
+ for (i = 0; i < PIC_REG_COUNT; i++) {
+ /* Clear IRQ cause registers, mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i);
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i);
+ /* Clear auto bounce, we don't need that */
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i);
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i);
+ /* Enable HTMSI transformer */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i);
+ }
+}
+
+static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
+ struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
+ u32 gsi_base)
+{
+ struct pch_pic *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->pic_lock);
+ priv->base = ioremap(addr, size);
+ if (!priv->base)
+ goto free_priv;
+
+ priv->ht_vec_base = vec_base;
+ priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
+ priv->gsi_base = gsi_base;
+
+ priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ priv->vec_count, domain_handle,
+ &pch_pic_domain_ops, priv);
+
+ if (!priv->pic_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ goto iounmap_base;
+ }
+
+ pch_pic_reset(priv);
+ pch_pic_handle[nr_pics] = domain_handle;
+ pch_pic_priv[nr_pics++] = priv;
+
+ return 0;
+
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_OF
+
+static int pch_pic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int err, vec_base;
+ struct resource res;
+ struct irq_domain *parent_domain;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -EINVAL;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) {
+ pr_err("Failed to determine pic-base-vec\n");
+ return -EINVAL;
+ }
+
+ err = pch_pic_init(res.start, resource_size(&res), vec_base,
+ parent_domain, of_node_to_fwnode(node), 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
+
+#endif
+
+#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
+static int __init pch_lpc_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
+
+ return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry);
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+ int r;
+
+ r = acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, pch_lpc_parse_madt, 0);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int __init pch_pic_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_bio_pic *acpi_pchpic)
+{
+ int ret;
+ struct fwnode_handle *domain_handle;
+
+ if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
+ return 0;
+
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+ if (!domain_handle) {
+ pr_err("Unable to allocate domain handle\n");
+ return -ENOMEM;
+ }
+
+ ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
+ 0, parent, domain_handle, acpi_pchpic->gsi_base);
+
+ if (ret < 0) {
+ irq_domain_free_fwnode(domain_handle);
+ return ret;
+ }
+
+ if (acpi_pchpic->id == 0)
+ ret = acpi_cascade_irqdomain_init();
+
+ return ret;
+}
+#endif
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 000000000..4d70a8571
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/exception.h>
+
+#define LPC32XX_INTC_MASK 0x00
+#define LPC32XX_INTC_RAW 0x04
+#define LPC32XX_INTC_STAT 0x08
+#define LPC32XX_INTC_POL 0x0C
+#define LPC32XX_INTC_TYPE 0x10
+#define LPC32XX_INTC_FIQ 0x14
+
+#define NR_LPC32XX_IC_IRQS 32
+
+struct lpc32xx_irq_chip {
+ void __iomem *base;
+ phys_addr_t addr;
+ struct irq_domain *domain;
+};
+
+static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
+
+static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
+{
+ return readl_relaxed(ic->base + reg);
+}
+
+static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ic->base + reg);
+}
+
+static void lpc32xx_irq_mask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_unmask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_ack(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq);
+
+ lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
+}
+
+static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+ bool high, edge;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = true;
+ high = true;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = true;
+ high = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = false;
+ high = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = false;
+ high = false;
+ break;
+ default:
+ pr_info("unsupported irq type %d\n", type);
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(d, type);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
+ if (high)
+ val |= mask;
+ else
+ val &= ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
+ if (edge) {
+ val |= mask;
+ irq_set_handler_locked(d, handle_edge_irq);
+ } else {
+ val &= ~mask;
+ irq_set_handler_locked(d, handle_level_irq);
+ }
+ lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
+
+ return 0;
+}
+
+static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ if (ic == lpc32xx_mic_irqc)
+ seq_printf(p, "%08x.mic", ic->addr);
+ else
+ seq_printf(p, "%08x.sic", ic->addr);
+}
+
+static const struct irq_chip lpc32xx_chip = {
+ .irq_ack = lpc32xx_irq_ack,
+ .irq_mask = lpc32xx_irq_mask,
+ .irq_unmask = lpc32xx_irq_unmask,
+ .irq_set_type = lpc32xx_irq_set_type,
+ .irq_print_chip = lpc32xx_irq_print_chip,
+};
+
+static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
+{
+ struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ generic_handle_domain_irq(lpc32xx_mic_irqc->domain, irq);
+ }
+}
+
+static void lpc32xx_sic_handler(struct irq_desc *desc)
+{
+ struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ chained_irq_enter(chip, desc);
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ generic_handle_domain_irq(ic->domain, irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lpc32xx_irq_chip *ic = id->host_data;
+
+ irq_set_chip_data(virq, ic);
+ irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
+{
+ irq_set_chip_and_handler(virq, NULL, NULL);
+}
+
+static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
+ .map = lpc32xx_irq_domain_map,
+ .unmap = lpc32xx_irq_domain_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init lpc32xx_of_ic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct lpc32xx_irq_chip *irqc;
+ bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
+ const __be32 *reg = of_get_property(node, "reg", NULL);
+ u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->addr = addr;
+ irqc->base = of_iomap(node, 0);
+ if (!irqc->base) {
+ pr_err("%pOF: unable to map registers\n", node);
+ kfree(irqc);
+ return -EINVAL;
+ }
+
+ irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
+ &lpc32xx_irq_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("unable to add irq domain\n");
+ iounmap(irqc->base);
+ kfree(irqc);
+ return -ENODEV;
+ }
+
+ if (is_mic) {
+ lpc32xx_mic_irqc = irqc;
+ set_handle_irq(lpc32xx_handle_irq);
+ } else {
+ for (i = 0; i < of_irq_count(node); i++) {
+ parent_irq = irq_of_parse_and_map(node, i);
+ if (parent_irq)
+ irq_set_chained_handler_and_data(parent_irq,
+ lpc32xx_sic_handler, irqc);
+ }
+ }
+
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
+IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
new file mode 100644
index 000000000..139f26b0a
--- /dev/null
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "irq-ls-extirq: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MAXIRQ 12
+#define LS1021A_SCFGREVCR 0x200
+
+struct ls_extirq_data {
+ void __iomem *intpcr;
+ raw_spinlock_t lock;
+ bool big_endian;
+ bool is_ls1021a_or_ls1043a;
+ u32 nirq;
+ struct irq_fwspec map[MAXIRQ];
+};
+
+static void ls_extirq_intpcr_rmw(struct ls_extirq_data *priv, u32 mask,
+ u32 value)
+{
+ u32 intpcr;
+
+ /*
+ * Serialize concurrent calls to ls_extirq_set_type() from multiple
+ * IRQ descriptors, making sure the read-modify-write is atomic.
+ */
+ raw_spin_lock(&priv->lock);
+
+ if (priv->big_endian)
+ intpcr = ioread32be(priv->intpcr);
+ else
+ intpcr = ioread32(priv->intpcr);
+
+ intpcr &= ~mask;
+ intpcr |= value;
+
+ if (priv->big_endian)
+ iowrite32be(intpcr, priv->intpcr);
+ else
+ iowrite32(intpcr, priv->intpcr);
+
+ raw_spin_unlock(&priv->lock);
+}
+
+static int
+ls_extirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct ls_extirq_data *priv = data->chip_data;
+ irq_hw_number_t hwirq = data->hwirq;
+ u32 value, mask;
+
+ if (priv->is_ls1021a_or_ls1043a)
+ mask = 1U << (31 - hwirq);
+ else
+ mask = 1U << hwirq;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ value = mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ value = mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ls_extirq_intpcr_rmw(priv, mask, value);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip ls_extirq_chip = {
+ .name = "ls-extirq",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = ls_extirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int
+ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct ls_extirq_data *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= priv->nirq)
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip,
+ priv);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
+}
+
+static const struct irq_domain_ops extirq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .alloc = ls_extirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int
+ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
+{
+ const __be32 *map;
+ u32 mapsize;
+ int ret;
+
+ map = of_get_property(node, "interrupt-map", &mapsize);
+ if (!map)
+ return -ENOENT;
+ if (mapsize % sizeof(*map))
+ return -EINVAL;
+ mapsize /= sizeof(*map);
+
+ while (mapsize) {
+ struct device_node *ipar;
+ u32 hwirq, intsize, j;
+
+ if (mapsize < 3)
+ return -EINVAL;
+ hwirq = be32_to_cpup(map);
+ if (hwirq >= MAXIRQ)
+ return -EINVAL;
+ priv->nirq = max(priv->nirq, hwirq + 1);
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
+ map += 3;
+ mapsize -= 3;
+ if (!ipar)
+ return -EINVAL;
+ priv->map[hwirq].fwnode = &ipar->fwnode;
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ if (ret)
+ return ret;
+
+ if (intsize > mapsize)
+ return -EINVAL;
+
+ priv->map[hwirq].param_count = intsize;
+ for (j = 0; j < intsize; ++j)
+ priv->map[hwirq].param[j] = be32_to_cpup(map++);
+ mapsize -= intsize;
+ }
+ return 0;
+}
+
+static int __init
+ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct ls_extirq_data *priv;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Cannot find parent domain\n");
+ ret = -ENODEV;
+ goto err_irq_find_host;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_alloc_priv;
+ }
+
+ /*
+ * All extirq OF nodes are under a scfg/syscon node with
+ * the 'ranges' property
+ */
+ priv->intpcr = of_iomap(node, 0);
+ if (!priv->intpcr) {
+ pr_err("Cannot ioremap OF node %pOF\n", node);
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ ret = ls_extirq_parse_map(priv, node);
+ if (ret)
+ goto err_parse_map;
+
+ priv->big_endian = of_device_is_big_endian(node->parent);
+ priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
+ of_device_is_compatible(node, "fsl,ls1043a-extirq");
+ raw_spin_lock_init(&priv->lock);
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
+ &extirq_domain_ops, priv);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto err_add_hierarchy;
+ }
+
+ return 0;
+
+err_add_hierarchy:
+err_parse_map:
+ iounmap(priv->intpcr);
+err_iomap:
+ kfree(priv);
+err_alloc_priv:
+err_irq_find_host:
+ return ret;
+}
+
+IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
+IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init);
+IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 000000000..527c90e09
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Freescale SCFG MSI(-X) support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define MSI_IRQS_PER_MSIR 32
+#define MSI_MSIR_OFFSET 4
+
+#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
+#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
+
+struct ls_scfg_msi_cfg {
+ u32 ibs_shift; /* Shift of interrupt bit select */
+ u32 msir_irqs; /* The irq number per MSIR */
+ u32 msir_base; /* The base address of MSIR */
+};
+
+struct ls_scfg_msir {
+ struct ls_scfg_msi *msi_data;
+ unsigned int index;
+ unsigned int gic_irq;
+ unsigned int bit_start;
+ unsigned int bit_end;
+ unsigned int srs; /* Shared interrupt register select */
+ void __iomem *reg;
+};
+
+struct ls_scfg_msi {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct irq_domain *parent;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ struct ls_scfg_msi_cfg *cfg;
+ u32 msir_num;
+ struct ls_scfg_msir *msir;
+ u32 irqs_num;
+ unsigned long *used;
+};
+
+static struct irq_chip ls_scfg_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info ls_scfg_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &ls_scfg_msi_irq_chip,
+};
+
+static int msi_affinity_flag = 1;
+
+static int __init early_parse_ls_scfg_msi(char *p)
+{
+ if (p && strncmp(p, "no-affinity", 11) == 0)
+ msi_affinity_flag = 0;
+ else
+ msi_affinity_flag = 1;
+
+ return 0;
+}
+early_param("lsmsi", early_parse_ls_scfg_msi);
+
+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+ msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+ msg->data = data->hwirq;
+
+ if (msi_affinity_flag) {
+ const struct cpumask *mask;
+
+ mask = irq_data_get_effective_affinity_mask(data);
+ msg->data |= cpumask_first(mask);
+ }
+
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+}
+
+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
+ u32 cpu;
+
+ if (!msi_affinity_flag)
+ return -EINVAL;
+
+ if (!force)
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask);
+
+ if (cpu >= msi_data->msir_num)
+ return -EINVAL;
+
+ if (msi_data->msir[cpu].gic_irq <= 0) {
+ pr_warn("cannot bind the irq to cpu%d\n", cpu);
+ return -EINVAL;
+ }
+
+ irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ls_scfg_msi_parent_chip = {
+ .name = "SCFG",
+ .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
+ .irq_set_affinity = ls_scfg_msi_set_affinity,
+};
+
+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct ls_scfg_msi *msi_data = domain->host_data;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ spin_lock(&msi_data->lock);
+ pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
+ if (pos < msi_data->irqs_num)
+ __set_bit(pos, msi_data->used);
+ else
+ err = -ENOSPC;
+ spin_unlock(&msi_data->lock);
+
+ if (err)
+ return err;
+
+ err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &ls_scfg_msi_parent_chip, msi_data,
+ handle_simple_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
+ int pos;
+
+ pos = d->hwirq;
+ if (pos < 0 || pos >= msi_data->irqs_num) {
+ pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+ return;
+ }
+
+ spin_lock(&msi_data->lock);
+ __clear_bit(pos, msi_data->used);
+ spin_unlock(&msi_data->lock);
+}
+
+static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+ .alloc = ls_scfg_msi_domain_irq_alloc,
+ .free = ls_scfg_msi_domain_irq_free,
+};
+
+static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+{
+ struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
+ struct ls_scfg_msi *msi_data = msir->msi_data;
+ unsigned long val;
+ int pos, size, hwirq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = ioread32be(msir->reg);
+
+ pos = msir->bit_start;
+ size = msir->bit_end + 1;
+
+ for_each_set_bit_from(pos, &val, size) {
+ hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
+ msir->srs;
+ generic_handle_domain_irq(msi_data->parent, hwirq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+{
+ /* Initialize MSI domain parent */
+ msi_data->parent = irq_domain_add_linear(NULL,
+ msi_data->irqs_num,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
+ if (!msi_data->parent) {
+ dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_data->msi_domain = pci_msi_create_irq_domain(
+ of_node_to_fwnode(msi_data->pdev->dev.of_node),
+ &ls_scfg_msi_domain_info,
+ msi_data->parent);
+ if (!msi_data->msi_domain) {
+ dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi_data->parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
+{
+ struct ls_scfg_msir *msir;
+ int virq, i, hwirq;
+
+ virq = platform_get_irq(msi_data->pdev, index);
+ if (virq <= 0)
+ return -ENODEV;
+
+ msir = &msi_data->msir[index];
+ msir->index = index;
+ msir->msi_data = msi_data;
+ msir->gic_irq = virq;
+ msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
+
+ if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
+ msir->bit_start = 32 - ((msir->index + 1) *
+ MSI_LS1043V1_1_IRQS_PER_MSIR);
+ msir->bit_end = msir->bit_start +
+ MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
+ } else {
+ msir->bit_start = 0;
+ msir->bit_end = msi_data->cfg->msir_irqs - 1;
+ }
+
+ irq_set_chained_handler_and_data(msir->gic_irq,
+ ls_scfg_msi_irq_handler,
+ msir);
+
+ if (msi_affinity_flag) {
+ /* Associate MSIR interrupt to the cpu */
+ irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
+ msir->srs = 0; /* This value is determined by the CPU */
+ } else
+ msir->srs = index;
+
+ /* Release the hwirqs corresponding to this MSIR */
+ if (!msi_affinity_flag || msir->index == 0) {
+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+ bitmap_clear(msi_data->used, hwirq, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
+{
+ struct ls_scfg_msi *msi_data = msir->msi_data;
+ int i, hwirq;
+
+ if (msir->gic_irq > 0)
+ irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
+
+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+ bitmap_set(msi_data->used, hwirq, 1);
+ }
+
+ return 0;
+}
+
+static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
+ .ibs_shift = 3,
+ .msir_irqs = MSI_IRQS_PER_MSIR,
+ .msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
+ .ibs_shift = 2,
+ .msir_irqs = MSI_IRQS_PER_MSIR,
+ .msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
+ .ibs_shift = 2,
+ .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
+ .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
+};
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+ /* The following two misspelled compatibles are obsolete */
+ { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
+ { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
+
+ { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
+ { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
+ { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
+ { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
+ { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
+
+static int ls_scfg_msi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct ls_scfg_msi *msi_data;
+ struct resource *res;
+ int i, ret;
+
+ match = of_match_device(ls_scfg_msi_id, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+ msi_data->msiir_addr = res->start;
+
+ msi_data->pdev = pdev;
+ spin_lock_init(&msi_data->lock);
+
+ msi_data->irqs_num = MSI_IRQS_PER_MSIR *
+ (1 << msi_data->cfg->ibs_shift);
+ msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
+ if (!msi_data->used)
+ return -ENOMEM;
+ /*
+ * Reserve all the hwirqs
+ * The available hwirqs will be released in ls1_msi_setup_hwirq()
+ */
+ bitmap_set(msi_data->used, 0, msi_data->irqs_num);
+
+ msi_data->msir_num = of_irq_count(pdev->dev.of_node);
+
+ if (msi_affinity_flag) {
+ u32 cpu_num;
+
+ cpu_num = num_possible_cpus();
+ if (msi_data->msir_num >= cpu_num)
+ msi_data->msir_num = cpu_num;
+ else
+ msi_affinity_flag = 0;
+ }
+
+ msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
+ sizeof(*msi_data->msir),
+ GFP_KERNEL);
+ if (!msi_data->msir)
+ return -ENOMEM;
+
+ for (i = 0; i < msi_data->msir_num; i++)
+ ls_scfg_msi_setup_hwirq(msi_data, i);
+
+ ret = ls_scfg_msi_domains_init(msi_data);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, msi_data);
+
+ return 0;
+}
+
+static int ls_scfg_msi_remove(struct platform_device *pdev)
+{
+ struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < msi_data->msir_num; i++)
+ ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
+
+ irq_domain_remove(msi_data->msi_domain);
+ irq_domain_remove(msi_data->parent);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ls_scfg_msi_driver = {
+ .driver = {
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
+ },
+ .probe = ls_scfg_msi_probe,
+ .remove = ls_scfg_msi_remove,
+};
+
+module_platform_driver(ls_scfg_msi_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
new file mode 100644
index 000000000..77a3f7dfa
--- /dev/null
+++ b/drivers/irqchip/irq-ls1x.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson-1 platform IRQ support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define LS_REG_INTC_STATUS 0x00
+#define LS_REG_INTC_EN 0x04
+#define LS_REG_INTC_SET 0x08
+#define LS_REG_INTC_CLR 0x0c
+#define LS_REG_INTC_POL 0x10
+#define LS_REG_INTC_EDGE 0x14
+
+/**
+ * struct ls1x_intc_priv - private ls1x-intc data.
+ * @domain: IRQ domain.
+ * @intc_base: IO Base of intc registers.
+ */
+
+struct ls1x_intc_priv {
+ struct irq_domain *domain;
+ void __iomem *intc_base;
+};
+
+
+static void ls1x_chained_handle_irq(struct irq_desc *desc)
+{
+ struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(priv->intc_base + LS_REG_INTC_STATUS) &
+ readl(priv->intc_base + LS_REG_INTC_EN);
+
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_domain_irq(priv->domain, bit);
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ls_intc_set_bit(struct irq_chip_generic *gc,
+ unsigned int offset,
+ u32 mask, bool set)
+{
+ if (set)
+ writel(readl(gc->reg_base + offset) | mask,
+ gc->reg_base + offset);
+ else
+ writel(readl(gc->reg_base + offset) & ~mask,
+ gc->reg_base + offset);
+}
+
+static int ls_intc_set_type(struct irq_data *data, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ u32 mask = data->mask;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(data, type);
+ return irq_setup_alt_chip(data, type);
+}
+
+
+static int __init ls1x_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct ls1x_intc_priv *priv;
+ int parent_irq, err = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->intc_base = of_iomap(node, 0);
+ if (!priv->intc_base) {
+ err = -ENODEV;
+ goto out_free_priv;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("ls1x-irq: unable to get parent irq\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ /* Set up an IRQ domain */
+ priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
+ NULL);
+ if (!priv->domain) {
+ pr_err("ls1x-irq: cannot add IRQ domain\n");
+ err = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ err = irq_alloc_domain_generic_chips(priv->domain, 32, 2,
+ node->full_name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (err) {
+ pr_err("ls1x-irq: unable to register IRQ domain\n");
+ goto out_free_domain;
+ }
+
+ /* Mask all irqs */
+ writel(0x0, priv->intc_base + LS_REG_INTC_EN);
+
+ /* Ack all irqs */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR);
+
+ /* Set all irqs to high level triggered */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL);
+
+ gc = irq_get_domain_generic_chip(priv->domain, 0);
+
+ gc->reg_base = priv->intc_base;
+
+ ct = gc->chip_types;
+ ct[0].type = IRQ_TYPE_LEVEL_MASK;
+ ct[0].regs.mask = LS_REG_INTC_EN;
+ ct[0].regs.ack = LS_REG_INTC_CLR;
+ ct[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[0].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[0].chip.irq_set_type = ls_intc_set_type;
+ ct[0].handler = handle_level_irq;
+
+ ct[1].type = IRQ_TYPE_EDGE_BOTH;
+ ct[1].regs.mask = LS_REG_INTC_EN;
+ ct[1].regs.ack = LS_REG_INTC_CLR;
+ ct[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[1].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[1].chip.irq_set_type = ls_intc_set_type;
+ ct[1].handler = handle_edge_irq;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ ls1x_chained_handle_irq, priv);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(priv->domain);
+out_iounmap:
+ iounmap(priv->intc_base);
+out_free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
new file mode 100644
index 000000000..8b81271c8
--- /dev/null
+++ b/drivers/irqchip/irq-madera.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/irq-madera.h>
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
+#define MADERA_IRQ(_irq, _reg) \
+ [MADERA_IRQ_ ## _irq] = { \
+ .reg_offset = (_reg) - MADERA_IRQ1_STATUS_2, \
+ .mask = MADERA_ ## _irq ## _EINT1 \
+ }
+
+/* Mappings are the same for all Madera codecs */
+static const struct regmap_irq madera_irqs[MADERA_NUM_IRQ] = {
+ MADERA_IRQ(FLL1_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLL2_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLL3_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLLAO_LOCK, MADERA_IRQ1_STATUS_2),
+
+ MADERA_IRQ(MICDET1, MADERA_IRQ1_STATUS_6),
+ MADERA_IRQ(MICDET2, MADERA_IRQ1_STATUS_6),
+ MADERA_IRQ(HPDET, MADERA_IRQ1_STATUS_6),
+
+ MADERA_IRQ(MICD_CLAMP_RISE, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(MICD_CLAMP_FALL, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(JD1_RISE, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(JD1_FALL, MADERA_IRQ1_STATUS_7),
+
+ MADERA_IRQ(ASRC2_IN1_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC2_IN2_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC1_IN1_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC1_IN2_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(DRC2_SIG_DET, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(DRC1_SIG_DET, MADERA_IRQ1_STATUS_9),
+
+ MADERA_IRQ(DSP_IRQ1, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ2, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ3, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ4, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ5, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ6, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ7, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ8, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ9, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ10, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ11, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ12, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ13, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ14, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ15, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ16, MADERA_IRQ1_STATUS_11),
+
+ MADERA_IRQ(HP3R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP3L_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP2R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP2L_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP1R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP1L_SC, MADERA_IRQ1_STATUS_12),
+
+ MADERA_IRQ(SPK_OVERHEAT_WARN, MADERA_IRQ1_STATUS_15),
+ MADERA_IRQ(SPK_OVERHEAT, MADERA_IRQ1_STATUS_15),
+
+ MADERA_IRQ(DSP1_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP2_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP3_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP4_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP5_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP6_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP7_BUS_ERR, MADERA_IRQ1_STATUS_33),
+};
+
+static const struct regmap_irq_chip madera_irq_chip = {
+ .name = "madera IRQ",
+ .status_base = MADERA_IRQ1_STATUS_2,
+ .mask_base = MADERA_IRQ1_MASK_2,
+ .ack_base = MADERA_IRQ1_STATUS_2,
+ .runtime_pm = true,
+ .num_regs = 32,
+ .irqs = madera_irqs,
+ .num_irqs = ARRAY_SIZE(madera_irqs),
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int madera_suspend(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "Suspend, disabling IRQ\n");
+
+ /*
+ * A runtime resume would be needed to access the chip interrupt
+ * controller but runtime pm doesn't function during suspend.
+ * Temporarily disable interrupts until we reach suspend_noirq state.
+ */
+ disable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_suspend_noirq(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "No IRQ suspend, reenabling IRQ\n");
+
+ /* Re-enable interrupts to service wakeup interrupts from the chip */
+ enable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_resume_noirq(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "No IRQ resume, disabling IRQ\n");
+
+ /*
+ * We can't handle interrupts until runtime pm is available again.
+ * Disable them temporarily.
+ */
+ disable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_resume(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "Resume, reenabling IRQ\n");
+
+ /* Interrupts can now be handled */
+ enable_irq(madera->irq);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops madera_irq_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(madera_suspend, madera_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(madera_suspend_noirq,
+ madera_resume_noirq)
+};
+
+static int madera_irq_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct irq_data *irq_data;
+ unsigned int irq_flags = 0;
+ int ret;
+
+ dev_dbg(&pdev->dev, "probe\n");
+
+ /*
+ * Read the flags from the interrupt controller if not specified
+ * by pdata
+ */
+ irq_flags = madera->pdata.irq_flags;
+ if (!irq_flags) {
+ irq_data = irq_get_irq_data(madera->irq);
+ if (!irq_data) {
+ dev_err(&pdev->dev, "Invalid IRQ: %d\n", madera->irq);
+ return -EINVAL;
+ }
+
+ irq_flags = irqd_get_trigger_type(irq_data);
+
+ /* Codec defaults to trigger low, use this if no flags given */
+ if (irq_flags == IRQ_TYPE_NONE)
+ irq_flags = IRQF_TRIGGER_LOW;
+ }
+
+ if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ dev_err(&pdev->dev, "Host interrupt not level-triggered\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The silicon always starts at active-low, check if we need to
+ * switch to active-high.
+ */
+ if (irq_flags & IRQF_TRIGGER_HIGH) {
+ ret = regmap_update_bits(madera->regmap, MADERA_IRQ1_CTRL,
+ MADERA_IRQ_POL_MASK, 0);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set IRQ polarity: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * NOTE: regmap registers this against the OF node of the parent of
+ * the regmap - that is, against the mfd driver
+ */
+ ret = regmap_add_irq_chip(madera->regmap, madera->irq, IRQF_ONESHOT, 0,
+ &madera_irq_chip, &madera->irq_data);
+ if (ret) {
+ dev_err(&pdev->dev, "add_irq_chip failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Save dev in parent MFD struct so it is accessible to siblings */
+ madera->irq_dev = &pdev->dev;
+
+ return 0;
+}
+
+static int madera_irq_remove(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+
+ /*
+ * The IRQ is disabled by the parent MFD driver before
+ * it starts cleaning up all child drivers
+ */
+ madera->irq_dev = NULL;
+ regmap_del_irq_chip(madera->irq, madera->irq_data);
+
+ return 0;
+}
+
+static struct platform_driver madera_irq_driver = {
+ .probe = &madera_irq_probe,
+ .remove = &madera_irq_remove,
+ .driver = {
+ .name = "madera-irq",
+ .pm = &madera_irq_pm_ops,
+ }
+};
+module_platform_driver(madera_irq_driver);
+
+MODULE_SOFTDEP("pre: madera");
+MODULE_DESCRIPTION("Madera IRQ driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
new file mode 100644
index 000000000..f3faf5c99
--- /dev/null
+++ b/drivers/irqchip/irq-mbigen.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved.
+ * Author: Jun Ma <majun258@huawei.com>
+ * Author: Yun Wu <wuyun.wu@huawei.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Interrupt numbers per mbigen node supported */
+#define IRQS_PER_MBIGEN_NODE 128
+
+/* 64 irqs (Pin0-pin63) are reserved for each mbigen chip */
+#define RESERVED_IRQ_PER_MBIGEN_CHIP 64
+
+/* The maximum IRQ pin number of mbigen chip(start from 0) */
+#define MAXIMUM_IRQ_PIN_NUM 1407
+
+/*
+ * In mbigen vector register
+ * bit[21:12]: event id value
+ * bit[11:0]: device id
+ */
+#define IRQ_EVENT_ID_SHIFT 12
+#define IRQ_EVENT_ID_MASK 0x3ff
+
+/* register range of each mbigen node */
+#define MBIGEN_NODE_OFFSET 0x1000
+
+/* offset of vector register in mbigen node */
+#define REG_MBIGEN_VEC_OFFSET 0x200
+
+/*
+ * offset of clear register in mbigen node
+ * This register is used to clear the status
+ * of interrupt
+ */
+#define REG_MBIGEN_CLEAR_OFFSET 0xa000
+
+/*
+ * offset of interrupt type register
+ * This register is used to configure interrupt
+ * trigger type
+ */
+#define REG_MBIGEN_TYPE_OFFSET 0x0
+
+/**
+ * struct mbigen_device - holds the information of mbigen device.
+ *
+ * @pdev: pointer to the platform device structure of mbigen chip.
+ * @base: mapped address of this mbigen chip.
+ */
+struct mbigen_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+};
+
+static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+{
+ unsigned int nid, pin;
+
+ hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
+ nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+ pin = hwirq % IRQS_PER_MBIGEN_NODE;
+
+ return pin * 4 + nid * MBIGEN_NODE_OFFSET
+ + REG_MBIGEN_VEC_OFFSET;
+}
+
+static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+ u32 *mask, u32 *addr)
+{
+ unsigned int nid, irq_ofst, ofst;
+
+ hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
+ nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+ irq_ofst = hwirq % IRQS_PER_MBIGEN_NODE;
+
+ *mask = 1 << (irq_ofst % 32);
+ ofst = irq_ofst / 32 * 4;
+
+ *addr = ofst + nid * MBIGEN_NODE_OFFSET
+ + REG_MBIGEN_TYPE_OFFSET;
+}
+
+static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
+ u32 *mask, u32 *addr)
+{
+ unsigned int ofst = (hwirq / 32) * 4;
+
+ *mask = 1 << (hwirq % 32);
+ *addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
+}
+
+static void mbigen_eoi_irq(struct irq_data *data)
+{
+ void __iomem *base = data->chip_data;
+ u32 mask, addr;
+
+ get_mbigen_clear_reg(data->hwirq, &mask, &addr);
+
+ writel_relaxed(mask, base + addr);
+
+ irq_chip_eoi_parent(data);
+}
+
+static int mbigen_set_type(struct irq_data *data, unsigned int type)
+{
+ void __iomem *base = data->chip_data;
+ u32 mask, addr, val;
+
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ get_mbigen_type_reg(data->hwirq, &mask, &addr);
+
+ val = readl_relaxed(base + addr);
+
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel_relaxed(val, base + addr);
+
+ return 0;
+}
+
+static struct irq_chip mbigen_irq_chip = {
+ .name = "mbigen-v2",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = mbigen_eoi_irq,
+ .irq_set_type = mbigen_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct irq_data *d = irq_get_irq_data(desc->irq);
+ void __iomem *base = d->chip_data;
+ u32 val;
+
+ if (!msg->address_lo && !msg->address_hi)
+ return;
+
+ base += get_mbigen_vec_reg(d->hwirq);
+ val = readl_relaxed(base);
+
+ val &= ~(IRQ_EVENT_ID_MASK << IRQ_EVENT_ID_SHIFT);
+ val |= (msg->data << IRQ_EVENT_ID_SHIFT);
+
+ /* The address of doorbell is encoded in mbigen register by default
+ * So,we don't need to program the doorbell address at here
+ */
+ writel_relaxed(val, base);
+}
+
+static int mbigen_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ if ((fwspec->param[0] > MAXIMUM_IRQ_PIN_NUM) ||
+ (fwspec->param[0] < RESERVED_IRQ_PER_MBIGEN_CHIP))
+ return -EINVAL;
+ else
+ *hwirq = fwspec->param[0];
+
+ /* If there is no valid irq type, just use the default type */
+ if ((fwspec->param[1] == IRQ_TYPE_EDGE_RISING) ||
+ (fwspec->param[1] == IRQ_TYPE_LEVEL_HIGH))
+ *type = fwspec->param[1];
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int mbigen_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct irq_fwspec *fwspec = args;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct mbigen_device *mgn_chip;
+ int i, err;
+
+ err = mbigen_domain_translate(domain, fwspec, &hwirq, &type);
+ if (err)
+ return err;
+
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
+ if (err)
+ return err;
+
+ mgn_chip = platform_msi_get_host_data(domain);
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mbigen_irq_chip, mgn_chip->base);
+
+ return 0;
+}
+
+static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mbigen_domain_ops = {
+ .translate = mbigen_domain_translate,
+ .alloc = mbigen_irq_domain_alloc,
+ .free = mbigen_irq_domain_free,
+};
+
+static int mbigen_of_create_domain(struct platform_device *pdev,
+ struct mbigen_device *mgn_chip)
+{
+ struct device *parent;
+ struct platform_device *child;
+ struct irq_domain *domain;
+ struct device_node *np;
+ u32 num_pins;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ continue;
+
+ parent = platform_bus_type.dev_root;
+ child = of_platform_device_create(np, NULL, parent);
+ if (!child) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(child->dev.of_node, "num-pins",
+ &num_pins) < 0) {
+ dev_err(&pdev->dev, "No num-pins property\n");
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ domain = platform_msi_create_device_domain(&child->dev, num_pins,
+ mbigen_write_msg,
+ &mbigen_domain_ops,
+ mgn_chip);
+ if (!domain) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mbigen_acpi_match[] = {
+ { "HISI0152", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
+
+static int mbigen_acpi_create_domain(struct platform_device *pdev,
+ struct mbigen_device *mgn_chip)
+{
+ struct irq_domain *domain;
+ u32 num_pins = 0;
+ int ret;
+
+ /*
+ * "num-pins" is the total number of interrupt pins implemented in
+ * this mbigen instance, and mbigen is an interrupt controller
+ * connected to ITS converting wired interrupts into MSI, so we
+ * use "num-pins" to alloc MSI vectors which are needed by client
+ * devices connected to it.
+ *
+ * Here is the DSDT device node used for mbigen in firmware:
+ * Device(MBI0) {
+ * Name(_HID, "HISI0152")
+ * Name(_UID, Zero)
+ * Name(_CRS, ResourceTemplate() {
+ * Memory32Fixed(ReadWrite, 0xa0080000, 0x10000)
+ * })
+ *
+ * Name(_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {"num-pins", 378}
+ * }
+ * })
+ * }
+ */
+ ret = device_property_read_u32(&pdev->dev, "num-pins", &num_pins);
+ if (ret || num_pins == 0)
+ return -EINVAL;
+
+ domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
+ mbigen_write_msg,
+ &mbigen_domain_ops,
+ mgn_chip);
+ if (!domain)
+ return -ENOMEM;
+
+ return 0;
+}
+#else
+static inline int mbigen_acpi_create_domain(struct platform_device *pdev,
+ struct mbigen_device *mgn_chip)
+{
+ return -ENODEV;
+}
+#endif
+
+static int mbigen_device_probe(struct platform_device *pdev)
+{
+ struct mbigen_device *mgn_chip;
+ struct resource *res;
+ int err;
+
+ mgn_chip = devm_kzalloc(&pdev->dev, sizeof(*mgn_chip), GFP_KERNEL);
+ if (!mgn_chip)
+ return -ENOMEM;
+
+ mgn_chip->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ mgn_chip->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!mgn_chip->base) {
+ dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+ return -ENOMEM;
+ }
+
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
+ err = mbigen_of_create_domain(pdev, mgn_chip);
+ else if (ACPI_COMPANION(&pdev->dev))
+ err = mbigen_acpi_create_domain(pdev, mgn_chip);
+ else
+ err = -EINVAL;
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create mbi-gen irqdomain\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mgn_chip);
+ return 0;
+}
+
+static const struct of_device_id mbigen_of_match[] = {
+ { .compatible = "hisilicon,mbigen-v2" },
+ { /* END */ }
+};
+MODULE_DEVICE_TABLE(of, mbigen_of_match);
+
+static struct platform_driver mbigen_platform_driver = {
+ .driver = {
+ .name = "Hisilicon MBIGEN-V2",
+ .of_match_table = mbigen_of_match,
+ .acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = mbigen_device_probe,
+};
+
+module_platform_driver(mbigen_platform_driver);
+
+MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
+MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HiSilicon MBI Generator driver");
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
new file mode 100644
index 000000000..c726a1983
--- /dev/null
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip External Interrupt Controller driver
+ *
+ * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MCHP_EIC_GFCS (0x0)
+#define MCHP_EIC_SCFG(x) (0x4 + (x) * 0x4)
+#define MCHP_EIC_SCFG_EN BIT(16)
+#define MCHP_EIC_SCFG_LVL BIT(9)
+#define MCHP_EIC_SCFG_POL BIT(8)
+
+#define MCHP_EIC_NIRQ (2)
+
+/*
+ * struct mchp_eic - EIC private data structure
+ * @base: base address
+ * @clk: peripheral clock
+ * @domain: irq domain
+ * @irqs: irqs b/w eic and gic
+ * @scfg: backup for scfg registers (necessary for backup and self-refresh mode)
+ * @wakeup_source: wakeup source mask
+ */
+struct mchp_eic {
+ void __iomem *base;
+ struct clk *clk;
+ struct irq_domain *domain;
+ u32 irqs[MCHP_EIC_NIRQ];
+ u32 scfg[MCHP_EIC_NIRQ];
+ u32 wakeup_source;
+};
+
+static struct mchp_eic *eic;
+
+static void mchp_eic_irq_mask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_mask_parent(d);
+}
+
+static void mchp_eic_irq_unmask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp |= MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_unmask_parent(d);
+}
+
+static int mchp_eic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int parent_irq_type;
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~(MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ tmp |= MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ tmp |= MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ tmp |= MCHP_EIC_SCFG_POL;
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ return irq_chip_set_type_parent(d, parent_irq_type);
+}
+
+static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ irq_set_irq_wake(eic->irqs[d->hwirq], on);
+ if (on)
+ eic->wakeup_source |= BIT(d->hwirq);
+ else
+ eic->wakeup_source &= ~BIT(d->hwirq);
+
+ return 0;
+}
+
+static int mchp_eic_irq_suspend(void)
+{
+ unsigned int hwirq;
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ eic->scfg[hwirq] = readl_relaxed(eic->base +
+ MCHP_EIC_SCFG(hwirq));
+
+ if (!eic->wakeup_source)
+ clk_disable_unprepare(eic->clk);
+
+ return 0;
+}
+
+static void mchp_eic_irq_resume(void)
+{
+ unsigned int hwirq;
+
+ if (!eic->wakeup_source)
+ clk_prepare_enable(eic->clk);
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ writel_relaxed(eic->scfg[hwirq], eic->base +
+ MCHP_EIC_SCFG(hwirq));
+}
+
+static struct syscore_ops mchp_eic_syscore_ops = {
+ .suspend = mchp_eic_irq_suspend,
+ .resume = mchp_eic_irq_resume,
+};
+
+static struct irq_chip mchp_eic_chip = {
+ .name = "eic",
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+ .irq_mask = mchp_eic_irq_mask,
+ .irq_unmask = mchp_eic_irq_unmask,
+ .irq_set_type = mchp_eic_irq_set_type,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = mchp_eic_irq_set_wake,
+};
+
+static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (ret || hwirq >= MCHP_EIC_NIRQ)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &mchp_eic_chip, eic);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = eic->irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mchp_eic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = mchp_eic_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *parent_domain = NULL;
+ int ret, i;
+
+ eic = kzalloc(sizeof(*eic), GFP_KERNEL);
+ if (!eic)
+ return -ENOMEM;
+
+ eic->base = of_iomap(node, 0);
+ if (!eic->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ eic->clk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(eic->clk)) {
+ ret = PTR_ERR(eic->clk);
+ goto unmap;
+ }
+
+ ret = clk_prepare_enable(eic->clk);
+ if (ret)
+ goto unmap;
+
+ for (i = 0; i < MCHP_EIC_NIRQ; i++) {
+ struct of_phandle_args irq;
+
+ /* Disable it, if any. */
+ writel_relaxed(0UL, eic->base + MCHP_EIC_SCFG(i));
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret)
+ goto clk_unprepare;
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto clk_unprepare;
+ }
+
+ eic->irqs[i] = irq.args[1];
+ }
+
+ eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ node, &mchp_eic_domain_ops, eic);
+ if (!eic->domain) {
+ pr_err("%pOF: Failed to add domain\n", node);
+ ret = -ENODEV;
+ goto clk_unprepare;
+ }
+
+ register_syscore_ops(&mchp_eic_syscore_ops);
+
+ pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
+
+ return 0;
+
+clk_unprepare:
+ clk_disable_unprepare(eic->clk);
+unmap:
+ iounmap(eic->base);
+free:
+ kfree(eic);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
+
+MODULE_DESCRIPTION("Microchip External Interrupt Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 000000000..7da18ef95
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define MAX_NUM_CHANNEL 64
+#define MAX_INPUT_MUX 256
+
+#define REG_EDGE_POL 0x00
+#define REG_PIN_03_SEL 0x04
+#define REG_PIN_47_SEL 0x08
+#define REG_FILTER_SEL 0x0c
+
+/* use for A1 like chips */
+#define REG_PIN_A1_SEL 0x04
+/* Used for s4 chips */
+#define REG_EDGE_POL_S4 0x1c
+
+/*
+ * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
+ * bits 24 to 31. Tests on the actual HW show that these bits are
+ * stuck at 0. Bits 8 to 15 are responsive and have the expected
+ * effect.
+ */
+#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x))
+#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x))
+#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x))
+#define REG_EDGE_POL_MASK(params, x) ( \
+ REG_EDGE_POL_EDGE(params, x) | \
+ REG_EDGE_POL_LOW(params, x) | \
+ REG_BOTH_EDGE(params, x))
+#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
+#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+
+struct meson_gpio_irq_controller;
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl);
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq);
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
+
+struct irq_ctl_ops {
+ void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+ void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+ int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq);
+};
+
+struct meson_gpio_irq_params {
+ unsigned int nr_hwirq;
+ unsigned int nr_channels;
+ bool support_edge_both;
+ unsigned int edge_both_offset;
+ unsigned int edge_single_offset;
+ unsigned int pol_low_offset;
+ unsigned int pin_sel_mask;
+ struct irq_ctl_ops ops;
+};
+
+#define INIT_MESON_COMMON(irqs, init, sel, type) \
+ .nr_hwirq = irqs, \
+ .ops = { \
+ .gpio_irq_init = init, \
+ .gpio_irq_sel_pin = sel, \
+ .gpio_irq_set_type = type, \
+ },
+
+#define INIT_MESON8_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
+ meson8_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
+ .edge_single_offset = 0, \
+ .pol_low_offset = 16, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 8, \
+
+#define INIT_MESON_A1_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson8_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 16, \
+ .edge_single_offset = 8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0x7f, \
+ .nr_channels = 8, \
+
+#define INIT_MESON_S4_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 12, \
+
+static const struct meson_gpio_irq_params meson8_params = {
+ INIT_MESON8_COMMON_DATA(134)
+};
+
+static const struct meson_gpio_irq_params meson8b_params = {
+ INIT_MESON8_COMMON_DATA(119)
+};
+
+static const struct meson_gpio_irq_params gxbb_params = {
+ INIT_MESON8_COMMON_DATA(133)
+};
+
+static const struct meson_gpio_irq_params gxl_params = {
+ INIT_MESON8_COMMON_DATA(110)
+};
+
+static const struct meson_gpio_irq_params axg_params = {
+ INIT_MESON8_COMMON_DATA(100)
+};
+
+static const struct meson_gpio_irq_params sm1_params = {
+ INIT_MESON8_COMMON_DATA(100)
+ .support_edge_both = true,
+ .edge_both_offset = 8,
+};
+
+static const struct meson_gpio_irq_params a1_params = {
+ INIT_MESON_A1_COMMON_DATA(62)
+};
+
+static const struct meson_gpio_irq_params s4_params = {
+ INIT_MESON_S4_COMMON_DATA(82)
+};
+
+static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
+ { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+ { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+ { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+ { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+ { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
+ { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
+ { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
+ { }
+};
+
+struct meson_gpio_irq_controller {
+ const struct meson_gpio_irq_params *params;
+ void __iomem *base;
+ u32 channel_irqs[MAX_NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
+ spinlock_t lock;
+};
+
+static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned int reg, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&ctl->lock, flags);
+
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
+
+ spin_unlock_irqrestore(&ctl->lock, flags);
+}
+
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+{
+}
+
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq)
+{
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ bit_offset = REG_PIN_SEL_SHIFT(channel);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq)
+{
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ bit_offset = ((channel % 2) == 0) ? 0 : 16;
+ reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+/* For a1 or later chips like a1 there is a switch to enable/disable irq */
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl)
+{
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31));
+}
+
+static int
+meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long hwirq,
+ u32 **channel_hwirq)
+{
+ unsigned long flags;
+ unsigned int idx;
+
+ spin_lock_irqsave(&ctl->lock, flags);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
+ if (idx >= ctl->params->nr_channels) {
+ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+ * to the appropriate input of the GIC
+ */
+ ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq);
+
+ /*
+ * Get the hwirq number assigned to this channel through
+ * a pointer the channel_irq table. The added benefit of this
+ * method is that we can also retrieve the channel index with
+ * it, using the table base.
+ */
+ *channel_hwirq = &(ctl->channel_irqs[idx]);
+
+ pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+ hwirq, idx, **channel_hwirq);
+
+ return 0;
+}
+
+static unsigned int
+meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ return channel_hwirq - ctl->channel_irqs;
+}
+
+static void
+meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+ clear_bit(idx, ctl->channel_map);
+}
+
+static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+ const struct meson_gpio_irq_params *params;
+
+ params = ctl->params;
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ /*
+ * The controller has a filter block to operate in either LEVEL or
+ * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
+ * EDGE_FALLING support (which the GIC does not support), the filter
+ * block is also able to invert the input signal it gets before
+ * providing it to the GIC.
+ */
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * New controller support EDGE_BOTH trigger. This setting takes
+ * precedence over the other edge/polarity settings
+ */
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ if (!params->support_edge_both)
+ return -EINVAL;
+
+ val |= REG_BOTH_EDGE(params, idx);
+ } else {
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_EDGE(params, idx);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_LOW(params, idx);
+ }
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ REG_EDGE_POL_MASK(params, idx), val);
+
+ return 0;
+}
+
+/*
+ * gpio irq relative registers for s4
+ * -PADCTRL_GPIO_IRQ_CTRL0
+ * bit[31]: enable/disable all the irq lines
+ * bit[12-23]: single edge trigger
+ * bit[0-11]: polarity trigger
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[X]
+ * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2
+ * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1
+ * where X = 1-6
+ *
+ * -PADCTRL_GPIO_IRQ_CTRL[7]
+ * bit[0-11]: both edge trigger
+ */
+static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
+ unsigned int type, u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ val |= BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
+ BIT(ctl->params->edge_both_offset + idx), val);
+ return 0;
+ }
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->pol_low_offset + idx);
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= BIT(ctl->params->edge_single_offset + idx);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ BIT(idx) | BIT(12 + idx), val);
+ return 0;
+};
+
+static unsigned int meson_gpio_irq_type_output(unsigned int type)
+{
+ unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
+
+ type &= ~IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The polarity of the signal provided to the GIC should always
+ * be high.
+ */
+ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ else
+ type |= IRQ_TYPE_EDGE_RISING;
+
+ return type;
+}
+
+static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct meson_gpio_irq_controller *ctl = data->domain->host_data;
+ u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(data,
+ meson_gpio_irq_type_output(type));
+}
+
+static struct irq_chip meson_gpio_irq_chip = {
+ .name = "meson-gpio-irqchip",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = meson_gpio_irq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
+ unsigned int virq,
+ u32 hwirq,
+ unsigned int type)
+{
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = meson_gpio_irq_type_output(type);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ unsigned long hwirq;
+ u32 *channel_hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
+ *channel_hwirq, type);
+ if (ret < 0) {
+ pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &meson_gpio_irq_chip, channel_hwirq);
+
+ return 0;
+}
+
+static void meson_gpio_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ struct irq_data *irq_data;
+ u32 *channel_hwirq;
+
+ if (WARN_ON(nr_irqs != 1))
+ return;
+
+ irq_domain_free_irqs_parent(domain, virq, 1);
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ channel_hwirq = irq_data_get_irq_chip_data(irq_data);
+
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+}
+
+static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
+ .alloc = meson_gpio_irq_domain_alloc,
+ .free = meson_gpio_irq_domain_free,
+ .translate = meson_gpio_irq_domain_translate,
+};
+
+static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl)
+{
+ const struct of_device_id *match;
+ int ret;
+
+ match = of_match_node(meson_irq_gpio_matches, node);
+ if (!match)
+ return -ENODEV;
+
+ ctl->params = match->data;
+
+ ret = of_property_read_variable_u32_array(node,
+ "amlogic,channel-interrupts",
+ ctl->channel_irqs,
+ ctl->params->nr_channels,
+ ctl->params->nr_channels);
+ if (ret < 0) {
+ pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels);
+ return ret;
+ }
+
+ ctl->params->ops.gpio_irq_init(ctl);
+
+ return 0;
+}
+
+static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct meson_gpio_irq_controller *ctl;
+ int ret;
+
+ if (!parent) {
+ pr_err("missing parent interrupt node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("unable to obtain parent domain\n");
+ return -ENXIO;
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ spin_lock_init(&ctl->lock);
+
+ ctl->base = of_iomap(node, 0);
+ if (!ctl->base) {
+ ret = -ENOMEM;
+ goto free_ctl;
+ }
+
+ ret = meson_gpio_irq_parse_dt(node, ctl);
+ if (ret)
+ goto free_channel_irqs;
+
+ domain = irq_domain_create_hierarchy(parent_domain, 0,
+ ctl->params->nr_hwirq,
+ of_node_to_fwnode(node),
+ &meson_gpio_irq_domain_ops,
+ ctl);
+ if (!domain) {
+ pr_err("failed to add domain\n");
+ ret = -ENODEV;
+ goto free_channel_irqs;
+ }
+
+ pr_info("%d to %d gpio interrupt mux initialized\n",
+ ctl->params->nr_hwirq, ctl->params->nr_channels);
+
+ return 0;
+
+free_channel_irqs:
+ iounmap(ctl->base);
+free_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
new file mode 100644
index 000000000..0c7ae71a0
--- /dev/null
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
+ *
+ * This file define the irq handler for MIPS CPU interrupts.
+ */
+
+/*
+ * Almost all MIPS CPUs define 8 interrupt sources. They are typically
+ * level triggered (i.e., cannot be cleared from CPU; must be cleared from
+ * device).
+ *
+ * The first two are software interrupts (i.e. not exposed as pins) which
+ * may be used for IPIs in multi-threaded single-core systems.
+ *
+ * The last one is usually the CPU timer interrupt if the counter register
+ * is present, or for old CPUs with an external FPU by convention it's the
+ * FPU exception interrupt.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/setup.h>
+
+static struct irq_domain *irq_domain;
+static struct irq_domain *ipi_domain;
+
+static inline void unmask_mips_irq(struct irq_data *d)
+{
+ set_c0_status(IE_SW0 << d->hwirq);
+ irq_enable_hazard();
+}
+
+static inline void mask_mips_irq(struct irq_data *d)
+{
+ clear_c0_status(IE_SW0 << d->hwirq);
+ irq_disable_hazard();
+}
+
+static struct irq_chip mips_cpu_irq_controller = {
+ .name = "MIPS",
+ .irq_ack = mask_mips_irq,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mask_mips_irq,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
+ .irq_disable = mask_mips_irq,
+ .irq_enable = unmask_mips_irq,
+};
+
+/*
+ * Basically the same as above but taking care of all the MT stuff
+ */
+
+static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
+{
+ unsigned int vpflags = dvpe();
+
+ clear_c0_cause(C_SW0 << d->hwirq);
+ evpe(vpflags);
+ unmask_mips_irq(d);
+ return 0;
+}
+
+/*
+ * While we ack the interrupt interrupts are disabled and thus we don't need
+ * to deal with concurrency issues. Same for mips_cpu_irq_end.
+ */
+static void mips_mt_cpu_irq_ack(struct irq_data *d)
+{
+ unsigned int vpflags = dvpe();
+ clear_c0_cause(C_SW0 << d->hwirq);
+ evpe(vpflags);
+ mask_mips_irq(d);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
+static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ int vpflags;
+
+ local_irq_save(flags);
+
+ /* We can only send IPIs to VPEs within the local core */
+ WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
+
+ vpflags = dvpe();
+ settc(cpu_vpe_id(&cpu_data[cpu]));
+ write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
+ evpe(vpflags);
+
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_GENERIC_IRQ_IPI */
+
+static struct irq_chip mips_mt_cpu_irq_controller = {
+ .name = "MIPS",
+ .irq_startup = mips_mt_cpu_irq_startup,
+ .irq_ack = mips_mt_cpu_irq_ack,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mips_mt_cpu_irq_ack,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
+ .irq_disable = mask_mips_irq,
+ .irq_enable = unmask_mips_irq,
+#ifdef CONFIG_GENERIC_IRQ_IPI
+ .ipi_send_single = mips_mt_send_ipi,
+#endif
+};
+
+asmlinkage void __weak plat_irq_dispatch(void)
+{
+ unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
+ int irq;
+
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
+
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ struct irq_domain *d;
+
+ irq = fls(pending) - 1;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
+ d = ipi_domain;
+ else
+ d = irq_domain;
+
+ do_domain_IRQ(d, irq);
+ pending &= ~BIT(irq);
+ }
+}
+
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ if (hw < 2 && cpu_has_mipsmt) {
+ /* Software interrupts are used for MT/CMT IPI */
+ chip = &mips_mt_cpu_irq_controller;
+ } else {
+ chip = &mips_cpu_irq_controller;
+ }
+
+ if (cpu_has_vint)
+ set_vi_handler(hw, plat_irq_dispatch);
+
+ irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+ .map = mips_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
+struct cpu_ipi_domain_state {
+ DECLARE_BITMAP(allocated, 2);
+};
+
+static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct cpu_ipi_domain_state *state = domain->host_data;
+ unsigned int i, hwirq;
+ int ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ hwirq = find_first_zero_bit(state->allocated, 2);
+ if (hwirq == 2)
+ return -EBUSY;
+ bitmap_set(state->allocated, hwirq, 1);
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
+ ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ bool is_ipi;
+
+ switch (bus_token) {
+ case DOMAIN_BUS_IPI:
+ is_ipi = d->bus_token == bus_token;
+ return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
+ default:
+ return 0;
+ }
+}
+
+static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
+ .alloc = mips_cpu_ipi_alloc,
+ .match = mips_cpu_ipi_match,
+};
+
+static void mips_cpu_register_ipi_domain(struct device_node *of_node)
+{
+ struct cpu_ipi_domain_state *ipi_domain_state;
+
+ ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
+ ipi_domain = irq_domain_add_hierarchy(irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_SINGLE,
+ 2, of_node,
+ &mips_cpu_ipi_chip_ops,
+ ipi_domain_state);
+ if (!ipi_domain)
+ panic("Failed to add MIPS CPU IPI domain");
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+}
+
+#else /* !CONFIG_GENERIC_IRQ_IPI */
+
+static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
+
+#endif /* !CONFIG_GENERIC_IRQ_IPI */
+
+static void __init __mips_cpu_irq_init(struct device_node *of_node)
+{
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops,
+ NULL);
+ if (!irq_domain)
+ panic("Failed to add irqdomain for MIPS CPU");
+
+ /*
+ * Only proceed to register the software interrupt IPI implementation
+ * for CPUs which implement the MIPS MT (multi-threading) ASE.
+ */
+ if (cpu_has_mipsmt)
+ mips_cpu_register_ipi_domain(of_node);
+}
+
+void __init mips_cpu_irq_init(void)
+{
+ __mips_cpu_irq_init(NULL);
+}
+
+int __init mips_cpu_irq_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ __mips_cpu_irq_init(of_node);
+ return 0;
+}
+IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
new file mode 100644
index 000000000..3a33aefec
--- /dev/null
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -0,0 +1,855 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "irq-mips-gic: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips-cps.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+#define GIC_MAX_INTRS 256
+#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
+
+/* Add 2 to convert GIC CPU pin to core interrupt */
+#define GIC_CPU_PIN_OFFSET 2
+
+/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
+#define GIC_PIN_TO_VEC_OFFSET 1
+
+/* Convert between local/shared IRQ number and GIC HW IRQ number. */
+#define GIC_LOCAL_HWIRQ_BASE 0
+#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
+#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
+#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
+
+void __iomem *mips_gic_base;
+
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
+
+static DEFINE_RAW_SPINLOCK(gic_lock);
+static struct irq_domain *gic_irq_domain;
+static int gic_shared_intrs;
+static unsigned int gic_cpu_pin;
+static unsigned int timer_cpu_pin;
+static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+#endif /* CONFIG_GENERIC_IRQ_IPI */
+
+static struct gic_all_vpes_chip_data {
+ u32 map;
+ bool mask;
+} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
+
+static void gic_clear_pcpu_masks(unsigned int intr)
+{
+ unsigned int i;
+
+ /* Clear the interrupt's bit in all pcpu_masks */
+ for_each_possible_cpu(i)
+ clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
+}
+
+static bool gic_local_irq_is_routable(int intr)
+{
+ u32 vpe_ctl;
+
+ /* All local interrupts are routable in EIC mode. */
+ if (cpu_has_veic)
+ return true;
+
+ vpe_ctl = read_gic_vl_ctl();
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
+ case GIC_LOCAL_INT_PERFCTR:
+ return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
+ case GIC_LOCAL_INT_FDC:
+ return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
+ case GIC_LOCAL_INT_SWINT0:
+ case GIC_LOCAL_INT_SWINT1:
+ return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
+ default:
+ return true;
+ }
+}
+
+static void gic_bind_eic_interrupt(int irq, int set)
+{
+ /* Convert irq vector # to hw int # */
+ irq -= GIC_PIN_TO_VEC_OFFSET;
+
+ /* Set irq to use shadow set */
+ write_gic_vl_eic_shadow_set(irq, set);
+}
+
+static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
+{
+ irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
+
+ write_gic_wedge(GIC_WEDGE_RW | hwirq);
+}
+
+int gic_get_c0_compare_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
+}
+
+int gic_get_c0_perfcount_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
+ /* Is the performance counter shared with the timer? */
+ if (cp0_perfcount_irq < 0)
+ return -1;
+ return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ }
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
+}
+
+int gic_get_c0_fdc_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
+ /* Is the FDC IRQ even present? */
+ if (cp0_fdc_irq < 0)
+ return -1;
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ }
+
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
+}
+
+static void gic_handle_shared_int(bool chained)
+{
+ unsigned int intr;
+ unsigned long *pcpu_mask;
+ DECLARE_BITMAP(pending, GIC_MAX_INTRS);
+
+ /* Get per-cpu bitmaps */
+ pcpu_mask = this_cpu_ptr(pcpu_masks);
+
+ if (mips_cm_is64)
+ __ioread64_copy(pending, addr_gic_pend(),
+ DIV_ROUND_UP(gic_shared_intrs, 64));
+ else
+ __ioread32_copy(pending, addr_gic_pend(),
+ DIV_ROUND_UP(gic_shared_intrs, 32));
+
+ bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
+
+ for_each_set_bit(intr, pending, gic_shared_intrs) {
+ if (chained)
+ generic_handle_domain_irq(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
+ else
+ do_domain_IRQ(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
+ }
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+ write_gic_rmask(intr);
+ gic_clear_pcpu_masks(intr);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int cpu;
+
+ write_gic_smask(intr);
+
+ gic_clear_pcpu_masks(intr);
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
+ set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
+}
+
+static void gic_ack_irq(struct irq_data *d)
+{
+ unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+ write_gic_wedge(irq);
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int irq, pol, trig, dual;
+ unsigned long flags;
+
+ irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ pol = GIC_POL_FALLING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ pol = GIC_POL_RISING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ pol = 0; /* Doesn't matter */
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_DUAL;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ pol = GIC_POL_ACTIVE_LOW;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ default:
+ pol = GIC_POL_ACTIVE_HIGH;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
+ break;
+ }
+
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+
+ if (trig == GIC_TRIG_EDGE)
+ irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
+ handle_edge_irq, NULL);
+ else
+ irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+ handle_level_irq, NULL);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned long flags;
+ unsigned int cpu;
+
+ cpu = cpumask_first_and(cpumask, cpu_online_mask);
+ if (cpu >= NR_CPUS)
+ return -EINVAL;
+
+ /* Assumption : cpumask refers to a single CPU */
+ raw_spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+
+ /* Update the pcpu_masks */
+ gic_clear_pcpu_masks(irq);
+ if (read_gic_mask(irq))
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip gic_level_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_set_type = gic_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+};
+
+static struct irq_chip gic_edge_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_ack = gic_ack_irq,
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_set_type = gic_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+ .ipi_send_single = gic_send_ipi,
+};
+
+static void gic_handle_local_int(bool chained)
+{
+ unsigned long pending, masked;
+ unsigned int intr;
+
+ pending = read_gic_vl_pend();
+ masked = read_gic_vl_mask();
+
+ bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
+
+ for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
+ if (chained)
+ generic_handle_domain_irq(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
+ else
+ do_domain_IRQ(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
+ }
+}
+
+static void gic_mask_local_irq(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+ write_gic_vl_rmask(BIT(intr));
+}
+
+static void gic_unmask_local_irq(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+ write_gic_vl_smask(BIT(intr));
+}
+
+static struct irq_chip gic_local_irq_controller = {
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq,
+ .irq_unmask = gic_unmask_local_irq,
+};
+
+static void gic_mask_local_irq_all_vpes(struct irq_data *d)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = false;
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_rmask(BIT(intr));
+ }
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = true;
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_smask(BIT(intr));
+ }
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static void gic_all_vpes_irq_cpu_online(void)
+{
+ static const unsigned int local_intrs[] = {
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_FDC,
+ };
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ unsigned int intr = local_intrs[i];
+ struct gic_all_vpes_chip_data *cd;
+
+ if (!gic_local_irq_is_routable(intr))
+ continue;
+ cd = &gic_all_vpes_chip_data[intr];
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static struct irq_chip gic_all_vpes_local_irq_controller = {
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+};
+
+static void __gic_irq_dispatch(void)
+{
+ gic_handle_local_int(false);
+ gic_handle_shared_int(false);
+}
+
+static void gic_irq_dispatch(struct irq_desc *desc)
+{
+ gic_handle_local_int(true);
+ gic_handle_shared_int(true);
+}
+
+static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw, unsigned int cpu)
+{
+ int intr = GIC_HWIRQ_TO_SHARED(hw);
+ struct irq_data *data;
+ unsigned long flags;
+
+ data = irq_get_irq_data(virq);
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+ write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ if (intspec[0] == GIC_SHARED)
+ *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
+ else if (intspec[0] == GIC_LOCAL)
+ *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
+ else
+ return -EINVAL;
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ unsigned int intr;
+ int err, cpu;
+ u32 map;
+
+ if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+#ifdef CONFIG_GENERIC_IRQ_IPI
+ /* verify that shared irqs don't conflict with an IPI irq */
+ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
+ return -EBUSY;
+#endif /* CONFIG_GENERIC_IRQ_IPI */
+
+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_level_irq_controller,
+ NULL);
+ if (err)
+ return err;
+
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ return gic_shared_irq_domain_map(d, virq, hwirq, 0);
+ }
+
+ intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+ map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
+ /*
+ * If adding support for more per-cpu interrupts, keep the the
+ * array in gic_all_vpes_irq_cpu_online() in sync.
+ */
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
+ fallthrough;
+ case GIC_LOCAL_INT_PERFCTR:
+ case GIC_LOCAL_INT_FDC:
+ /*
+ * HACK: These are all really percpu interrupts, but
+ * the rest of the MIPS kernel code does not use the
+ * percpu IRQ API for them.
+ */
+ cd = &gic_all_vpes_chip_data[intr];
+ cd->map = map;
+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_all_vpes_local_irq_controller,
+ cd);
+ if (err)
+ return err;
+
+ irq_set_handler(virq, handle_percpu_irq);
+ break;
+
+ default:
+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_local_irq_controller,
+ NULL);
+ if (err)
+ return err;
+
+ irq_set_handler(virq, handle_percpu_devid_irq);
+ irq_set_percpu_devid(virq);
+ break;
+ }
+
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ }
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+}
+
+static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param[0] == GIC_SHARED)
+ hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+ else
+ hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
+
+ return gic_irq_domain_map(d, virq, hwirq);
+}
+
+void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .xlate = gic_irq_domain_xlate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
+ .map = gic_irq_domain_map,
+};
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
+static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ /*
+ * There's nothing to translate here. hwirq is dynamically allocated and
+ * the irq type is always edge triggered.
+ * */
+ *out_hwirq = 0;
+ *out_type = IRQ_TYPE_EDGE_RISING;
+
+ return 0;
+}
+
+static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct cpumask *ipimask = arg;
+ irq_hw_number_t hwirq, base_hwirq;
+ int cpu, ret, i;
+
+ base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
+ if (base_hwirq == gic_shared_intrs)
+ return -ENOMEM;
+
+ /* check that we have enough space */
+ for (i = base_hwirq; i < nr_irqs; i++) {
+ if (!test_bit(i, ipi_available))
+ return -EBUSY;
+ }
+ bitmap_clear(ipi_available, base_hwirq, nr_irqs);
+
+ /* map the hwirq for each cpu consecutively */
+ i = 0;
+ for_each_cpu(cpu, ipimask) {
+ hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
+
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
+ if (ret)
+ goto error;
+
+ ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
+ if (ret)
+ goto error;
+
+ i++;
+ }
+
+ return 0;
+error:
+ bitmap_set(ipi_available, base_hwirq, nr_irqs);
+ return ret;
+}
+
+static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_hw_number_t base_hwirq;
+ struct irq_data *data;
+
+ data = irq_get_irq_data(virq);
+ if (!data)
+ return;
+
+ base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
+ bitmap_set(ipi_available, base_hwirq, nr_irqs);
+}
+
+static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ bool is_ipi;
+
+ switch (bus_token) {
+ case DOMAIN_BUS_IPI:
+ is_ipi = d->bus_token == bus_token;
+ return (!node || to_of_node(d->fwnode) == node) && is_ipi;
+ break;
+ default:
+ return 0;
+ }
+}
+
+static const struct irq_domain_ops gic_ipi_domain_ops = {
+ .xlate = gic_ipi_domain_xlate,
+ .alloc = gic_ipi_domain_alloc,
+ .free = gic_ipi_domain_free,
+ .match = gic_ipi_domain_match,
+};
+
+static int gic_register_ipi_domain(struct device_node *node)
+{
+ struct irq_domain *gic_ipi_domain;
+ unsigned int v[2], num_ipis;
+
+ gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_ipi_domain_ops, NULL);
+ if (!gic_ipi_domain) {
+ pr_err("Failed to add IPI domain");
+ return -ENXIO;
+ }
+
+ irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
+
+ if (node &&
+ !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+ bitmap_set(ipi_resrv, v[0], v[1]);
+ } else {
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
+ }
+
+ bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+
+ return 0;
+}
+
+#else /* !CONFIG_GENERIC_IRQ_IPI */
+
+static inline int gic_register_ipi_domain(struct device_node *node)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_GENERIC_IRQ_IPI */
+
+static int gic_cpu_startup(unsigned int cpu)
+{
+ /* Enable or disable EIC */
+ change_gic_vl_ctl(GIC_VX_CTL_EIC,
+ cpu_has_veic ? GIC_VX_CTL_EIC : 0);
+
+ /* Clear all local IRQ masks (ie. disable all local interrupts) */
+ write_gic_vl_rmask(~0);
+
+ /* Enable desired interrupts */
+ gic_all_vpes_irq_cpu_online();
+
+ return 0;
+}
+
+static int __init gic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int cpu_vec, i, gicconfig;
+ unsigned long reserved;
+ phys_addr_t gic_base;
+ struct resource res;
+ size_t gic_len;
+ int ret;
+
+ /* Find the first available CPU vector. */
+ i = 0;
+ reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
+ while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
+ i++, &cpu_vec))
+ reserved |= BIT(cpu_vec);
+
+ cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
+ if (cpu_vec == hweight_long(ST0_IM)) {
+ pr_err("No CPU vectors available\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ /*
+ * Probe the CM for the GIC base address if not specified
+ * in the device-tree.
+ */
+ if (mips_cm_present()) {
+ gic_base = read_gcr_gic_base() &
+ ~CM_GCR_GIC_BASE_GICEN;
+ gic_len = 0x20000;
+ pr_warn("Using inherited base address %pa\n",
+ &gic_base);
+ } else {
+ pr_err("Failed to get memory range\n");
+ return -ENODEV;
+ }
+ } else {
+ gic_base = res.start;
+ gic_len = resource_size(&res);
+ }
+
+ if (mips_cm_present()) {
+ write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
+
+ mips_gic_base = ioremap(gic_base, gic_len);
+ if (!mips_gic_base) {
+ pr_err("Failed to ioremap gic_base\n");
+ return -ENOMEM;
+ }
+
+ gicconfig = read_gic_config();
+ gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
+ gic_shared_intrs = (gic_shared_intrs + 1) * 8;
+
+ if (cpu_has_veic) {
+ /* Always use vector 1 in EIC mode */
+ gic_cpu_pin = 0;
+ timer_cpu_pin = gic_cpu_pin;
+ set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
+ __gic_irq_dispatch);
+ } else {
+ gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
+ gic_irq_dispatch);
+ /*
+ * With the CMP implementation of SMP (deprecated), other CPUs
+ * are started by the bootloader and put into a timer based
+ * waiting poll loop. We must not re-route those CPU's local
+ * timer interrupts as the wait instruction will never finish,
+ * so just handle whatever CPU interrupt it is routed to by
+ * default.
+ *
+ * This workaround should be removed when CMP support is
+ * dropped.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMP) &&
+ gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
+ timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
+ GIC_CPU_PIN_OFFSET +
+ timer_cpu_pin,
+ gic_irq_dispatch);
+ } else {
+ timer_cpu_pin = gic_cpu_pin;
+ }
+ }
+
+ gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
+ gic_shared_intrs, 0,
+ &gic_irq_domain_ops, NULL);
+ if (!gic_irq_domain) {
+ pr_err("Failed to add IRQ domain");
+ return -ENXIO;
+ }
+
+ ret = gic_register_ipi_domain(node);
+ if (ret)
+ return ret;
+
+ board_bind_eic_interrupt = &gic_bind_eic_interrupt;
+
+ /* Setup defaults */
+ for (i = 0; i < gic_shared_intrs; i++) {
+ change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
+ change_gic_trig(i, GIC_TRIG_LEVEL);
+ write_gic_rmask(i);
+ }
+
+ return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ "irqchip/mips/gic:starting",
+ gic_cpu_startup, NULL);
+}
+IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
new file mode 100644
index 000000000..83455ca72
--- /dev/null
+++ b/drivers/irqchip/irq-mmp.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/mach-mmp/irq.c
+ *
+ * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
+ * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
+ *
+ * Author: Bin Yang <bin.yang@marvell.com>
+ * Haojian Zhuang <haojian.zhuang@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/hardirq.h>
+
+#define MAX_ICU_NR 16
+
+#define PJ1_INT_SEL 0x10c
+#define PJ4_INT_SEL 0x104
+
+/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
+#define SEL_INT_PENDING (1 << 6)
+#define SEL_INT_NUM_MASK 0x3f
+
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
+
+struct icu_chip_data {
+ int nr_irqs;
+ unsigned int virq_base;
+ unsigned int cascade_irq;
+ void __iomem *reg_status;
+ void __iomem *reg_mask;
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+ unsigned int conf2_mask;
+ unsigned int clr_mfp_irq_base;
+ unsigned int clr_mfp_hwirq;
+ struct irq_domain *domain;
+};
+
+struct mmp_intc_conf {
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+ unsigned int conf2_mask;
+};
+
+static void __iomem *mmp_icu_base;
+static void __iomem *mmp_icu2_base;
+static struct icu_chip_data icu_data[MAX_ICU_NR];
+static int max_icu_nr;
+
+extern void mmp2_clear_pmic_int(void);
+
+static void icu_mask_ack_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+#ifdef CONFIG_CPU_MMP2
+ if ((data->virq_base == data->clr_mfp_irq_base)
+ && (hwirq == data->clr_mfp_hwirq))
+ mmp2_clear_pmic_int();
+#endif
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_mask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+
+ if (data->conf2_mask) {
+ /*
+ * ICU1 (above) only controls PJ4 MP1; if using SMP,
+ * we need to also mask the MP2 and MM cores via ICU2.
+ */
+ r = readl_relaxed(mmp_icu2_base + (hwirq << 2));
+ r &= ~data->conf2_mask;
+ writel_relaxed(r, mmp_icu2_base + (hwirq << 2));
+ }
+ } else {
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_unmask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_enable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+ r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+struct irq_chip icu_irq_chip = {
+ .name = "icu_irq",
+ .irq_mask = icu_mask_irq,
+ .irq_mask_ack = icu_mask_ack_irq,
+ .irq_unmask = icu_unmask_irq,
+};
+
+static void icu_mux_irq_demux(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain;
+ struct icu_chip_data *data;
+ int i;
+ unsigned long mask, status, n;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 1; i < max_icu_nr; i++) {
+ if (irq == icu_data[i].cascade_irq) {
+ domain = icu_data[i].domain;
+ data = (struct icu_chip_data *)domain->host_data;
+ break;
+ }
+ }
+ if (i >= max_icu_nr) {
+ pr_err("Spurious irq %d in MMP INTC\n", irq);
+ goto out;
+ }
+
+ mask = readl_relaxed(data->reg_mask);
+ while (1) {
+ status = readl_relaxed(data->reg_status) & ~mask;
+ if (status == 0)
+ break;
+ for_each_set_bit(n, &status, BITS_PER_LONG) {
+ generic_handle_irq(icu_data[i].virq_base + n);
+ }
+ }
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ *out_hwirq = intspec[0];
+ return 0;
+}
+
+static const struct irq_domain_ops mmp_irq_domain_ops = {
+ .map = mmp_irq_domain_map,
+ .xlate = mmp_irq_domain_xlate,
+};
+
+static const struct mmp_intc_conf mmp_conf = {
+ .conf_enable = 0x51,
+ .conf_disable = 0x0,
+ .conf_mask = 0x7f,
+};
+
+static const struct mmp_intc_conf mmp2_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
+};
+
+static struct mmp_intc_conf mmp3_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
+ .conf2_mask = 0xf0,
+};
+
+static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
+{
+ int hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
+}
+
+static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
+{
+ int hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
+}
+
+/* MMP (ARMv5) */
+void __init icu_init_irq(void)
+{
+ int irq;
+
+ max_icu_nr = 1;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < 64; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp_handle_irq);
+}
+
+/* MMP2 (ARMv7) */
+void __init mmp2_init_icu(void)
+{
+ int irq, end;
+
+ max_icu_nr = 8;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ icu_data[1].reg_status = mmp_icu_base + 0x150;
+ icu_data[1].reg_mask = mmp_icu_base + 0x168;
+ icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
+ icu_data[0].nr_irqs;
+ icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
+ icu_data[1].nr_irqs = 2;
+ icu_data[1].cascade_irq = 4;
+ icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
+ icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
+ icu_data[1].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[1]);
+ icu_data[2].reg_status = mmp_icu_base + 0x154;
+ icu_data[2].reg_mask = mmp_icu_base + 0x16c;
+ icu_data[2].nr_irqs = 2;
+ icu_data[2].cascade_irq = 5;
+ icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
+ icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
+ icu_data[2].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[2]);
+ icu_data[3].reg_status = mmp_icu_base + 0x180;
+ icu_data[3].reg_mask = mmp_icu_base + 0x17c;
+ icu_data[3].nr_irqs = 3;
+ icu_data[3].cascade_irq = 9;
+ icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
+ icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
+ icu_data[3].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[3]);
+ icu_data[4].reg_status = mmp_icu_base + 0x158;
+ icu_data[4].reg_mask = mmp_icu_base + 0x170;
+ icu_data[4].nr_irqs = 5;
+ icu_data[4].cascade_irq = 17;
+ icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
+ icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
+ icu_data[4].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[4]);
+ icu_data[5].reg_status = mmp_icu_base + 0x15c;
+ icu_data[5].reg_mask = mmp_icu_base + 0x174;
+ icu_data[5].nr_irqs = 15;
+ icu_data[5].cascade_irq = 35;
+ icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
+ icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
+ icu_data[5].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[5]);
+ icu_data[6].reg_status = mmp_icu_base + 0x160;
+ icu_data[6].reg_mask = mmp_icu_base + 0x178;
+ icu_data[6].nr_irqs = 2;
+ icu_data[6].cascade_irq = 51;
+ icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
+ icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
+ icu_data[6].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[6]);
+ icu_data[7].reg_status = mmp_icu_base + 0x188;
+ icu_data[7].reg_mask = mmp_icu_base + 0x184;
+ icu_data[7].nr_irqs = 2;
+ icu_data[7].cascade_irq = 55;
+ icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
+ icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
+ icu_data[7].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[7]);
+ end = icu_data[7].virq_base + icu_data[7].nr_irqs;
+ for (irq = 0; irq < end; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ if (irq == icu_data[1].cascade_irq ||
+ irq == icu_data[2].cascade_irq ||
+ irq == icu_data[3].cascade_irq ||
+ irq == icu_data[4].cascade_irq ||
+ irq == icu_data[5].cascade_irq ||
+ irq == icu_data[6].cascade_irq ||
+ irq == icu_data[7].cascade_irq) {
+ irq_set_chip(irq, &icu_irq_chip);
+ irq_set_chained_handler(irq, icu_mux_irq_demux);
+ } else {
+ irq_set_chip_and_handler(irq, &icu_irq_chip,
+ handle_level_irq);
+ }
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp2_handle_irq);
+}
+
+#ifdef CONFIG_OF
+static int __init mmp_init_bases(struct device_node *node)
+{
+ int ret, nr_irqs, irq, i = 0;
+
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return ret;
+ }
+
+ mmp_icu_base = of_iomap(node, 0);
+ if (!mmp_icu_base) {
+ pr_err("Failed to get interrupt controller register\n");
+ return -ENOMEM;
+ }
+
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[0].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[0].virq_base = ret;
+ }
+ icu_data[0].nr_irqs = nr_irqs;
+ return 0;
+err:
+ if (icu_data[0].virq_base) {
+ for (i = 0; i < irq; i++)
+ irq_dispose_mapping(icu_data[0].virq_base + i);
+ }
+ irq_domain_remove(icu_data[0].domain);
+ iounmap(mmp_icu_base);
+ return -EINVAL;
+}
+
+static int __init mmp_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ set_handle_irq(mmp_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
+
+static int __init mmp2_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ set_handle_irq(mmp2_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+
+static int __init mmp3_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ mmp_icu2_base = of_iomap(node, 1);
+ if (!mmp_icu2_base) {
+ pr_err("Failed to get interrupt controller register #2\n");
+ return -ENODEV;
+ }
+
+ ret = mmp_init_bases(node);
+ if (ret < 0) {
+ iounmap(mmp_icu2_base);
+ return ret;
+ }
+
+ icu_data[0].conf_enable = mmp3_conf.conf_enable;
+ icu_data[0].conf_disable = mmp3_conf.conf_disable;
+ icu_data[0].conf_mask = mmp3_conf.conf_mask;
+ icu_data[0].conf2_mask = mmp3_conf.conf2_mask;
+
+ if (!parent) {
+ /* This is the main interrupt controller. */
+ set_handle_irq(mmp2_handle_irq);
+ }
+
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init);
+
+static int __init mmp2_mux_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int i, ret, irq, j = 0;
+ u32 nr_irqs, mfp_irq;
+ u32 reg[4];
+
+ if (!parent)
+ return -ENODEV;
+
+ i = max_icu_nr;
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
+ &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * For historical reasons, the "regs" property of the
+ * mrvl,mmp2-mux-intc is not a regular "regs" property containing
+ * addresses on the parent bus, but offsets from the intc's base.
+ * That is why we can't use of_address_to_resource() here.
+ */
+ ret = of_property_read_variable_u32_array(node, "reg", reg,
+ ARRAY_SIZE(reg),
+ ARRAY_SIZE(reg));
+ if (ret < 0) {
+ pr_err("Not found reg property\n");
+ return -EINVAL;
+ }
+ icu_data[i].reg_status = mmp_icu_base + reg[0];
+ icu_data[i].reg_mask = mmp_icu_base + reg[2];
+ icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
+ if (!icu_data[i].cascade_irq)
+ return -EINVAL;
+
+ icu_data[i].virq_base = 0;
+ icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[i]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[i].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[i].virq_base = ret;
+ }
+ icu_data[i].nr_irqs = nr_irqs;
+ if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
+ &mfp_irq)) {
+ icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
+ icu_data[i].clr_mfp_hwirq = mfp_irq;
+ }
+ irq_set_chained_handler(icu_data[i].cascade_irq,
+ icu_mux_irq_demux);
+ max_icu_nr++;
+ return 0;
+err:
+ if (icu_data[i].virq_base) {
+ for (j = 0; j < irq; j++)
+ irq_dispose_mapping(icu_data[i].virq_base + j);
+ }
+ irq_domain_remove(icu_data[i].domain);
+ return -EINVAL;
+}
+IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
+#endif
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
new file mode 100644
index 000000000..4d0c3532d
--- /dev/null
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot IRQ controller driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+
+#define ICPU_CFG_INTR_DST_INTR_IDENT(_p, x) ((_p)->reg_off_ident + 0x4 * (x))
+#define ICPU_CFG_INTR_INTR_TRIGGER(_p, x) ((_p)->reg_off_trigger + 0x4 * (x))
+
+#define FLAGS_HAS_TRIGGER BIT(0)
+#define FLAGS_NEED_INIT_ENABLE BIT(1)
+
+struct chip_props {
+ u8 flags;
+ u8 reg_off_sticky;
+ u8 reg_off_ena;
+ u8 reg_off_ena_clr;
+ u8 reg_off_ena_set;
+ u8 reg_off_ident;
+ u8 reg_off_trigger;
+ u8 reg_off_ena_irq0;
+ u8 n_irq;
+};
+
+static struct chip_props ocelot_props = {
+ .flags = FLAGS_HAS_TRIGGER,
+ .reg_off_sticky = 0x10,
+ .reg_off_ena = 0x18,
+ .reg_off_ena_clr = 0x1c,
+ .reg_off_ena_set = 0x20,
+ .reg_off_ident = 0x38,
+ .reg_off_trigger = 0x5c,
+ .n_irq = 24,
+};
+
+static struct chip_props serval_props = {
+ .flags = FLAGS_HAS_TRIGGER,
+ .reg_off_sticky = 0xc,
+ .reg_off_ena = 0x14,
+ .reg_off_ena_clr = 0x18,
+ .reg_off_ena_set = 0x1c,
+ .reg_off_ident = 0x20,
+ .reg_off_trigger = 0x4,
+ .n_irq = 24,
+};
+
+static struct chip_props luton_props = {
+ .flags = FLAGS_NEED_INIT_ENABLE,
+ .reg_off_sticky = 0,
+ .reg_off_ena = 0x4,
+ .reg_off_ena_clr = 0x8,
+ .reg_off_ena_set = 0xc,
+ .reg_off_ident = 0x18,
+ .reg_off_ena_irq0 = 0x14,
+ .n_irq = 28,
+};
+
+static struct chip_props jaguar2_props = {
+ .flags = FLAGS_HAS_TRIGGER,
+ .reg_off_sticky = 0x10,
+ .reg_off_ena = 0x18,
+ .reg_off_ena_clr = 0x1c,
+ .reg_off_ena_set = 0x20,
+ .reg_off_ident = 0x38,
+ .reg_off_trigger = 0x5c,
+ .n_irq = 29,
+};
+
+static void ocelot_irq_unmask(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_domain *d = data->domain;
+ struct chip_props *p = d->host_data;
+ struct irq_chip_type *ct = irq_data_get_chip_type(data);
+ unsigned int mask = data->mask;
+ u32 val;
+
+ irq_gc_lock(gc);
+ val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) |
+ irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1));
+ if (!(val & mask))
+ irq_reg_writel(gc, mask, p->reg_off_sticky);
+
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(gc, mask, p->reg_off_ena_set);
+ irq_gc_unlock(gc);
+}
+
+static void ocelot_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct chip_props *p = d->host_data;
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
+ u32 reg = irq_reg_readl(gc, ICPU_CFG_INTR_DST_INTR_IDENT(p, 0));
+
+ chained_irq_enter(chip, desc);
+
+ while (reg) {
+ u32 hwirq = __fls(reg);
+
+ generic_handle_domain_irq(d, hwirq);
+ reg &= ~(BIT(hwirq));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init vcoreiii_irq_init(struct device_node *node,
+ struct device_node *parent,
+ struct chip_props *p)
+{
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int parent_irq, ret;
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq)
+ return -EINVAL;
+
+ domain = irq_domain_add_linear(node, p->n_irq,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%pOFn: unable to add irq domain\n", node);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, p->n_irq, 1,
+ "icpu", handle_level_irq,
+ 0, 0, 0);
+ if (ret) {
+ pr_err("%pOFn: unable to alloc irq domain gc\n", node);
+ goto err_domain_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = of_iomap(node, 0);
+ if (!gc->reg_base) {
+ pr_err("%pOFn: unable to map resource\n", node);
+ ret = -ENOMEM;
+ goto err_gc_free;
+ }
+
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[0].regs.ack = p->reg_off_sticky;
+ if (p->flags & FLAGS_HAS_TRIGGER) {
+ gc->chip_types[0].regs.mask = p->reg_off_ena_clr;
+ gc->chip_types[0].chip.irq_unmask = ocelot_irq_unmask;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ } else {
+ gc->chip_types[0].regs.enable = p->reg_off_ena_set;
+ gc->chip_types[0].regs.disable = p->reg_off_ena_clr;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ }
+
+ /* Mask and ack all interrupts */
+ irq_reg_writel(gc, 0, p->reg_off_ena);
+ irq_reg_writel(gc, 0xffffffff, p->reg_off_sticky);
+
+ /* Overall init */
+ if (p->flags & FLAGS_NEED_INIT_ENABLE)
+ irq_reg_writel(gc, BIT(0), p->reg_off_ena_irq0);
+
+ domain->host_data = p;
+ irq_set_chained_handler_and_data(parent_irq, ocelot_irq_handler,
+ domain);
+
+ return 0;
+
+err_gc_free:
+ irq_free_generic_chip(gc);
+
+err_domain_remove:
+ irq_domain_remove(domain);
+
+ return ret;
+}
+
+static int __init ocelot_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return vcoreiii_irq_init(node, parent, &ocelot_props);
+}
+
+IRQCHIP_DECLARE(ocelot_icpu, "mscc,ocelot-icpu-intr", ocelot_irq_init);
+
+static int __init serval_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return vcoreiii_irq_init(node, parent, &serval_props);
+}
+
+IRQCHIP_DECLARE(serval_icpu, "mscc,serval-icpu-intr", serval_irq_init);
+
+static int __init luton_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return vcoreiii_irq_init(node, parent, &luton_props);
+}
+
+IRQCHIP_DECLARE(luton_icpu, "mscc,luton-icpu-intr", luton_irq_init);
+
+static int __init jaguar2_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return vcoreiii_irq_init(node, parent, &jaguar2_props);
+}
+
+IRQCHIP_DECLARE(jaguar2_icpu, "mscc,jaguar2-icpu-intr", jaguar2_irq_init);
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
new file mode 100644
index 000000000..f6133ae28
--- /dev/null
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author Mark-PK Tsai <mark-pk.tsai@mediatek.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+#define MST_INTC_MAX_IRQS 64
+
+#define INTC_MASK 0x0
+#define INTC_REV_POLARITY 0x10
+#define INTC_EOI 0x20
+
+#ifdef CONFIG_PM_SLEEP
+static LIST_HEAD(mst_intc_list);
+#endif
+
+struct mst_intc_chip_data {
+ raw_spinlock_t lock;
+ unsigned int irq_start, nr_irqs;
+ void __iomem *base;
+ bool no_eoi;
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+ u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)];
+#endif
+};
+
+static void mst_set_irq(struct irq_data *d, u32 offset)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+ u16 val, mask;
+ unsigned long flags;
+
+ mask = 1 << (hwirq % 16);
+ offset += (hwirq / 16) * 4;
+
+ raw_spin_lock_irqsave(&cd->lock, flags);
+ val = readw_relaxed(cd->base + offset) | mask;
+ writew_relaxed(val, cd->base + offset);
+ raw_spin_unlock_irqrestore(&cd->lock, flags);
+}
+
+static void mst_clear_irq(struct irq_data *d, u32 offset)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+ u16 val, mask;
+ unsigned long flags;
+
+ mask = 1 << (hwirq % 16);
+ offset += (hwirq / 16) * 4;
+
+ raw_spin_lock_irqsave(&cd->lock, flags);
+ val = readw_relaxed(cd->base + offset) & ~mask;
+ writew_relaxed(val, cd->base + offset);
+ raw_spin_unlock_irqrestore(&cd->lock, flags);
+}
+
+static void mst_intc_mask_irq(struct irq_data *d)
+{
+ mst_set_irq(d, INTC_MASK);
+ irq_chip_mask_parent(d);
+}
+
+static void mst_intc_unmask_irq(struct irq_data *d)
+{
+ mst_clear_irq(d, INTC_MASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void mst_intc_eoi_irq(struct irq_data *d)
+{
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+
+ if (!cd->no_eoi)
+ mst_set_irq(d, INTC_EOI);
+
+ irq_chip_eoi_parent(d);
+}
+
+static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type)
+{
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_EDGE_FALLING:
+ mst_set_irq(data, INTC_REV_POLARITY);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ mst_clear_irq(data, INTC_REV_POLARITY);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip mst_intc_chip = {
+ .name = "mst-intc",
+ .irq_mask = mst_intc_mask_irq,
+ .irq_unmask = mst_intc_unmask_irq,
+ .irq_eoi = mst_intc_eoi_irq,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
+ .irq_set_type = mst_irq_chip_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static void mst_intc_polarity_save(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4);
+}
+
+static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
+{
+ int i;
+ void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+ for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+ writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
+}
+
+static void mst_irq_resume(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_restore(cd);
+}
+
+static int mst_irq_suspend(void)
+{
+ struct mst_intc_chip_data *cd;
+
+ list_for_each_entry(cd, &mst_intc_list, entry)
+ mst_intc_polarity_save(cd);
+ return 0;
+}
+
+static struct syscore_ops mst_irq_syscore_ops = {
+ .suspend = mst_irq_suspend,
+ .resume = mst_irq_resume,
+};
+
+static int __init mst_irq_pm_init(void)
+{
+ register_syscore_ops(&mst_irq_syscore_ops);
+ return 0;
+}
+late_initcall(mst_irq_pm_init);
+#endif
+
+static int mst_intc_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct mst_intc_chip_data *cd = d->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ if (fwspec->param[1] >= cd->nr_irqs)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct irq_fwspec parent_fwspec, *fwspec = data;
+ struct mst_intc_chip_data *cd = domain->host_data;
+
+ /* Not GIC compliant */
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0])
+ return -EINVAL;
+
+ hwirq = fwspec->param[1];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mst_intc_chip,
+ domain->host_data);
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param[1] = cd->irq_start + hwirq;
+
+ /*
+ * mst-intc latch the interrupt request if it's edge triggered,
+ * so the output signal to parent GIC is always level sensitive.
+ * And if the irq signal is active low, configure it to active high
+ * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit.
+ */
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mst_intc_domain_ops = {
+ .translate = mst_intc_domain_translate,
+ .alloc = mst_intc_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init mst_intc_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct mst_intc_chip_data *cd;
+ u32 irq_start, irq_end;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mst-intc: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(dn, "mstar,irqs-map-range", 0, &irq_start) ||
+ of_property_read_u32_index(dn, "mstar,irqs-map-range", 1, &irq_end))
+ return -EINVAL;
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ cd->base = of_iomap(dn, 0);
+ if (!cd->base) {
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ cd->no_eoi = of_property_read_bool(dn, "mstar,intc-no-eoi");
+ raw_spin_lock_init(&cd->lock);
+ cd->irq_start = irq_start;
+ cd->nr_irqs = irq_end - irq_start + 1;
+ domain = irq_domain_add_hierarchy(domain_parent, 0, cd->nr_irqs, dn,
+ &mst_intc_domain_ops, cd);
+ if (!domain) {
+ iounmap(cd->base);
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ INIT_LIST_HEAD(&cd->entry);
+ list_add_tail(&cd->entry, &mst_intc_list);
+#endif
+ return 0;
+}
+
+IRQCHIP_DECLARE(mst_intc, "mstar,mst-intc", mst_intc_of_init);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
new file mode 100644
index 000000000..9bca09180
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Youlin.Pei <youlin.pei@mediatek.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#define CIRQ_ACK 0x40
+#define CIRQ_MASK_SET 0xc0
+#define CIRQ_MASK_CLR 0x100
+#define CIRQ_SENS_SET 0x180
+#define CIRQ_SENS_CLR 0x1c0
+#define CIRQ_POL_SET 0x240
+#define CIRQ_POL_CLR 0x280
+#define CIRQ_CONTROL 0x300
+
+#define CIRQ_EN 0x1
+#define CIRQ_EDGE 0x2
+#define CIRQ_FLUSH 0x4
+
+struct mtk_cirq_chip_data {
+ void __iomem *base;
+ unsigned int ext_irq_start;
+ unsigned int ext_irq_end;
+ struct irq_domain *domain;
+};
+
+static struct mtk_cirq_chip_data *cirq_data;
+
+static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
+{
+ struct mtk_cirq_chip_data *chip_data = data->chip_data;
+ unsigned int cirq_num = data->hwirq;
+ u32 mask = 1 << (cirq_num % 32);
+
+ writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
+}
+
+static void mtk_cirq_mask(struct irq_data *data)
+{
+ mtk_cirq_write_mask(data, CIRQ_MASK_SET);
+ irq_chip_mask_parent(data);
+}
+
+static void mtk_cirq_unmask(struct irq_data *data)
+{
+ mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
+ irq_chip_unmask_parent(data);
+}
+
+static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
+{
+ int ret;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ mtk_cirq_write_mask(data, CIRQ_POL_CLR);
+ mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ mtk_cirq_write_mask(data, CIRQ_POL_SET);
+ mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ mtk_cirq_write_mask(data, CIRQ_POL_CLR);
+ mtk_cirq_write_mask(data, CIRQ_SENS_SET);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ mtk_cirq_write_mask(data, CIRQ_POL_SET);
+ mtk_cirq_write_mask(data, CIRQ_SENS_SET);
+ break;
+ default:
+ break;
+ }
+
+ data = data->parent_data;
+ ret = data->chip->irq_set_type(data, type);
+ return ret;
+}
+
+static struct irq_chip mtk_cirq_chip = {
+ .name = "MT_CIRQ",
+ .irq_mask = mtk_cirq_mask,
+ .irq_unmask = mtk_cirq_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = mtk_cirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int mtk_cirq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ /* cirq support irq number check */
+ if (fwspec->param[1] < cirq_data->ext_irq_start ||
+ fwspec->param[1] > cirq_data->ext_irq_end)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec parent_fwspec = *fwspec;
+
+ ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &mtk_cirq_chip,
+ domain->host_data);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops cirq_domain_ops = {
+ .translate = mtk_cirq_domain_translate,
+ .alloc = mtk_cirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_cirq_suspend(void)
+{
+ u32 value, mask;
+ unsigned int irq, hwirq_num;
+ bool pending, masked;
+ int i, pendret, maskret;
+
+ /*
+ * When external interrupts happened, CIRQ will record the status
+ * even CIRQ is not enabled. When execute flush command, CIRQ will
+ * resend the signals according to the status. So if don't clear the
+ * status, CIRQ will resend the wrong signals.
+ *
+ * arch_suspend_disable_irqs() will be called before CIRQ suspend
+ * callback. If clear all the status simply, the external interrupts
+ * which happened between arch_suspend_disable_irqs and CIRQ suspend
+ * callback will be lost. Using following steps to avoid this issue;
+ *
+ * - Iterate over all the CIRQ supported interrupts;
+ * - For each interrupt, inspect its pending and masked status at GIC
+ * level;
+ * - If pending and unmasked, it happened between
+ * arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
+ * it. Otherwise, ACK it.
+ */
+ hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
+ for (i = 0; i < hwirq_num; i++) {
+ irq = irq_find_mapping(cirq_data->domain, i);
+ if (irq) {
+ pendret = irq_get_irqchip_state(irq,
+ IRQCHIP_STATE_PENDING,
+ &pending);
+
+ maskret = irq_get_irqchip_state(irq,
+ IRQCHIP_STATE_MASKED,
+ &masked);
+
+ if (pendret == 0 && maskret == 0 &&
+ (pending && !masked))
+ continue;
+ }
+
+ mask = 1 << (i % 32);
+ writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
+ }
+
+ /* set edge_only mode, record edge-triggerd interrupts */
+ /* enable cirq */
+ value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+ value |= (CIRQ_EDGE | CIRQ_EN);
+ writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
+
+ return 0;
+}
+
+static void mtk_cirq_resume(void)
+{
+ u32 value;
+
+ /* flush recorded interrupts, will send signals to parent controller */
+ value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+ writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
+
+ /* disable cirq */
+ value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
+ value &= ~(CIRQ_EDGE | CIRQ_EN);
+ writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
+}
+
+static struct syscore_ops mtk_cirq_syscore_ops = {
+ .suspend = mtk_cirq_suspend,
+ .resume = mtk_cirq_resume,
+};
+
+static void mtk_cirq_syscore_init(void)
+{
+ register_syscore_ops(&mtk_cirq_syscore_ops);
+}
+#else
+static inline void mtk_cirq_syscore_init(void) {}
+#endif
+
+static int __init mtk_cirq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ unsigned int irq_num;
+ int ret;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mtk_cirq: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
+ if (!cirq_data)
+ return -ENOMEM;
+
+ cirq_data->base = of_iomap(node, 0);
+ if (!cirq_data->base) {
+ pr_err("mtk_cirq: unable to map cirq register\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
+ &cirq_data->ext_irq_start);
+ if (ret)
+ goto out_unmap;
+
+ ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
+ &cirq_data->ext_irq_end);
+ if (ret)
+ goto out_unmap;
+
+ irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
+ domain = irq_domain_add_hierarchy(domain_parent, 0,
+ irq_num, node,
+ &cirq_domain_ops, cirq_data);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ cirq_data->domain = domain;
+
+ mtk_cirq_syscore_init();
+
+ return 0;
+
+out_unmap:
+ iounmap(cirq_data->base);
+out_free:
+ kfree(cirq_data);
+ return ret;
+}
+
+IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
new file mode 100644
index 000000000..586e52d54
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Joe.C <yingjoe.chen@mediatek.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct mtk_sysirq_chip_data {
+ raw_spinlock_t lock;
+ u32 nr_intpol_bases;
+ void __iomem **intpol_bases;
+ u32 *intpol_words;
+ u8 *intpol_idx;
+ u16 *which_word;
+};
+
+static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+{
+ irq_hw_number_t hwirq = data->hwirq;
+ struct mtk_sysirq_chip_data *chip_data = data->chip_data;
+ u8 intpol_idx = chip_data->intpol_idx[hwirq];
+ void __iomem *base;
+ u32 offset, reg_index, value;
+ unsigned long flags;
+ int ret;
+
+ base = chip_data->intpol_bases[intpol_idx];
+ reg_index = chip_data->which_word[hwirq];
+ offset = hwirq & 0x1f;
+
+ raw_spin_lock_irqsave(&chip_data->lock, flags);
+ value = readl_relaxed(base + reg_index * 4);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
+ if (type == IRQ_TYPE_LEVEL_LOW)
+ type = IRQ_TYPE_LEVEL_HIGH;
+ else
+ type = IRQ_TYPE_EDGE_RISING;
+ value |= (1 << offset);
+ } else {
+ value &= ~(1 << offset);
+ }
+
+ writel_relaxed(value, base + reg_index * 4);
+
+ data = data->parent_data;
+ ret = data->chip->irq_set_type(data, type);
+ raw_spin_unlock_irqrestore(&chip_data->lock, flags);
+ return ret;
+}
+
+static struct irq_chip mtk_sysirq_chip = {
+ .name = "MT_SYSIRQ",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = mtk_sysirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int mtk_sysirq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec gic_fwspec = *fwspec;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* sysirq doesn't support PPI */
+ if (fwspec->param[0])
+ return -EINVAL;
+
+ hwirq = fwspec->param[1];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mtk_sysirq_chip,
+ domain->host_data);
+
+ gic_fwspec.fwnode = domain->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
+}
+
+static const struct irq_domain_ops sysirq_domain_ops = {
+ .translate = mtk_sysirq_domain_translate,
+ .alloc = mtk_sysirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init mtk_sysirq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct mtk_sysirq_chip_data *chip_data;
+ int ret, size, intpol_num = 0, nr_intpol_bases = 0, i = 0;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mtk_sysirq: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return -ENOMEM;
+
+ while (of_get_address(node, i++, NULL, NULL))
+ nr_intpol_bases++;
+
+ if (nr_intpol_bases == 0) {
+ pr_err("mtk_sysirq: base address not specified\n");
+ ret = -EINVAL;
+ goto out_free_chip;
+ }
+
+ chip_data->intpol_words = kcalloc(nr_intpol_bases,
+ sizeof(*chip_data->intpol_words),
+ GFP_KERNEL);
+ if (!chip_data->intpol_words) {
+ ret = -ENOMEM;
+ goto out_free_chip;
+ }
+
+ chip_data->intpol_bases = kcalloc(nr_intpol_bases,
+ sizeof(*chip_data->intpol_bases),
+ GFP_KERNEL);
+ if (!chip_data->intpol_bases) {
+ ret = -ENOMEM;
+ goto out_free_intpol_words;
+ }
+
+ for (i = 0; i < nr_intpol_bases; i++) {
+ struct resource res;
+
+ ret = of_address_to_resource(node, i, &res);
+ size = resource_size(&res);
+ intpol_num += size * 8;
+ chip_data->intpol_words[i] = size / 4;
+ chip_data->intpol_bases[i] = of_iomap(node, i);
+ if (ret || !chip_data->intpol_bases[i]) {
+ pr_err("%pOF: couldn't map region %d\n", node, i);
+ ret = -ENODEV;
+ goto out_free_intpol;
+ }
+ }
+
+ chip_data->intpol_idx = kcalloc(intpol_num,
+ sizeof(*chip_data->intpol_idx),
+ GFP_KERNEL);
+ if (!chip_data->intpol_idx) {
+ ret = -ENOMEM;
+ goto out_free_intpol;
+ }
+
+ chip_data->which_word = kcalloc(intpol_num,
+ sizeof(*chip_data->which_word),
+ GFP_KERNEL);
+ if (!chip_data->which_word) {
+ ret = -ENOMEM;
+ goto out_free_intpol_idx;
+ }
+
+ /*
+ * assign an index of the intpol_bases for each irq
+ * to set it fast later
+ */
+ for (i = 0; i < intpol_num ; i++) {
+ u32 word = i / 32, j;
+
+ for (j = 0; word >= chip_data->intpol_words[j] ; j++)
+ word -= chip_data->intpol_words[j];
+
+ chip_data->intpol_idx[i] = j;
+ chip_data->which_word[i] = word;
+ }
+
+ domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node,
+ &sysirq_domain_ops, chip_data);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_free_which_word;
+ }
+ raw_spin_lock_init(&chip_data->lock);
+
+ return 0;
+
+out_free_which_word:
+ kfree(chip_data->which_word);
+out_free_intpol_idx:
+ kfree(chip_data->intpol_idx);
+out_free_intpol:
+ for (i = 0; i < nr_intpol_bases; i++)
+ if (chip_data->intpol_bases[i])
+ iounmap(chip_data->intpol_bases[i]);
+ kfree(chip_data->intpol_bases);
+out_free_intpol_words:
+ kfree(chip_data->intpol_words);
+out_free_chip:
+ kfree(chip_data);
+ return ret;
+}
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
new file mode 100644
index 000000000..c43a34506
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_SETSPI_NSR_OFFSET 0x0
+#define GICP_CLRSPI_NSR_OFFSET 0x8
+
+struct mvebu_gicp_spi_range {
+ unsigned int start;
+ unsigned int count;
+};
+
+struct mvebu_gicp {
+ struct mvebu_gicp_spi_range *spi_ranges;
+ unsigned int spi_ranges_cnt;
+ unsigned int spi_cnt;
+ unsigned long *spi_bitmap;
+ spinlock_t spi_lock;
+ struct resource *res;
+ struct device *dev;
+};
+
+static int gicp_idx_to_spi(struct mvebu_gicp *gicp, int idx)
+{
+ int i;
+
+ for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+ struct mvebu_gicp_spi_range *r = &gicp->spi_ranges[i];
+
+ if (idx < r->count)
+ return r->start + idx;
+
+ idx -= r->count;
+ }
+
+ return -EINVAL;
+}
+
+static void gicp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mvebu_gicp *gicp = data->chip_data;
+ phys_addr_t setspi = gicp->res->start + GICP_SETSPI_NSR_OFFSET;
+ phys_addr_t clrspi = gicp->res->start + GICP_CLRSPI_NSR_OFFSET;
+
+ msg[0].data = data->hwirq;
+ msg[0].address_lo = lower_32_bits(setspi);
+ msg[0].address_hi = upper_32_bits(setspi);
+ msg[1].data = data->hwirq;
+ msg[1].address_lo = lower_32_bits(clrspi);
+ msg[1].address_hi = upper_32_bits(clrspi);
+}
+
+static struct irq_chip gicp_irq_chip = {
+ .name = "GICP",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_compose_msi_msg = gicp_compose_msi_msg,
+};
+
+static int gicp_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mvebu_gicp *gicp = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned int hwirq;
+ int ret;
+
+ spin_lock(&gicp->spi_lock);
+ hwirq = find_first_zero_bit(gicp->spi_bitmap, gicp->spi_cnt);
+ if (hwirq == gicp->spi_cnt) {
+ spin_unlock(&gicp->spi_lock);
+ return -ENOSPC;
+ }
+ __set_bit(hwirq, gicp->spi_bitmap);
+ spin_unlock(&gicp->spi_lock);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = GIC_SPI;
+ fwspec.param[1] = gicp_idx_to_spi(gicp, hwirq) - 32;
+ /*
+ * Assume edge rising for now, it will be properly set when
+ * ->set_type() is called
+ */
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret) {
+ dev_err(gicp->dev, "Cannot allocate parent IRQ\n");
+ goto free_hwirq;
+ }
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &gicp_irq_chip, gicp);
+ if (ret)
+ goto free_irqs_parent;
+
+ return 0;
+
+free_irqs_parent:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+free_hwirq:
+ spin_lock(&gicp->spi_lock);
+ __clear_bit(hwirq, gicp->spi_bitmap);
+ spin_unlock(&gicp->spi_lock);
+ return ret;
+}
+
+static void gicp_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mvebu_gicp *gicp = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (d->hwirq >= gicp->spi_cnt) {
+ dev_err(gicp->dev, "Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ spin_lock(&gicp->spi_lock);
+ __clear_bit(d->hwirq, gicp->spi_bitmap);
+ spin_unlock(&gicp->spi_lock);
+}
+
+static const struct irq_domain_ops gicp_domain_ops = {
+ .alloc = gicp_irq_domain_alloc,
+ .free = gicp_irq_domain_free,
+};
+
+static struct irq_chip gicp_msi_irq_chip = {
+ .name = "GICP",
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static struct msi_domain_ops gicp_msi_ops = {
+};
+
+static struct msi_domain_info gicp_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_LEVEL_CAPABLE),
+ .ops = &gicp_msi_ops,
+ .chip = &gicp_msi_irq_chip,
+};
+
+static int mvebu_gicp_probe(struct platform_device *pdev)
+{
+ struct mvebu_gicp *gicp;
+ struct irq_domain *inner_domain, *plat_domain, *parent_domain;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *irq_parent_dn;
+ int ret, i;
+
+ gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
+ if (!gicp)
+ return -ENOMEM;
+
+ gicp->dev = &pdev->dev;
+ spin_lock_init(&gicp->spi_lock);
+
+ gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!gicp->res)
+ return -ENODEV;
+
+ ret = of_property_count_u32_elems(node, "marvell,spi-ranges");
+ if (ret < 0)
+ return ret;
+
+ gicp->spi_ranges_cnt = ret / 2;
+
+ gicp->spi_ranges =
+ devm_kcalloc(&pdev->dev,
+ gicp->spi_ranges_cnt,
+ sizeof(struct mvebu_gicp_spi_range),
+ GFP_KERNEL);
+ if (!gicp->spi_ranges)
+ return -ENOMEM;
+
+ for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+ of_property_read_u32_index(node, "marvell,spi-ranges",
+ i * 2,
+ &gicp->spi_ranges[i].start);
+
+ of_property_read_u32_index(node, "marvell,spi-ranges",
+ i * 2 + 1,
+ &gicp->spi_ranges[i].count);
+
+ gicp->spi_cnt += gicp->spi_ranges[i].count;
+ }
+
+ gicp->spi_bitmap = devm_bitmap_zalloc(&pdev->dev, gicp->spi_cnt, GFP_KERNEL);
+ if (!gicp->spi_bitmap)
+ return -ENOMEM;
+
+ irq_parent_dn = of_irq_find_parent(node);
+ if (!irq_parent_dn) {
+ dev_err(&pdev->dev, "failed to find parent IRQ node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(irq_parent_dn);
+ of_node_put(irq_parent_dn);
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
+ return -ENODEV;
+ }
+
+ inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ gicp->spi_cnt,
+ of_node_to_fwnode(node),
+ &gicp_domain_ops, gicp);
+ if (!inner_domain)
+ return -ENOMEM;
+
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &gicp_msi_domain_info,
+ inner_domain);
+ if (!plat_domain) {
+ irq_domain_remove(inner_domain);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, gicp);
+
+ return 0;
+}
+
+static const struct of_device_id mvebu_gicp_of_match[] = {
+ { .compatible = "marvell,ap806-gicp", },
+ {},
+};
+
+static struct platform_driver mvebu_gicp_driver = {
+ .probe = mvebu_gicp_probe,
+ .driver = {
+ .name = "mvebu-gicp",
+ .of_match_table = mvebu_gicp_of_match,
+ },
+};
+builtin_platform_driver(mvebu_gicp_driver);
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
new file mode 100644
index 000000000..497da3447
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Hanna Hawa <hannah@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/mvebu-icu.h>
+
+/* ICU registers */
+#define ICU_SETSPI_NSR_AL 0x10
+#define ICU_SETSPI_NSR_AH 0x14
+#define ICU_CLRSPI_NSR_AL 0x18
+#define ICU_CLRSPI_NSR_AH 0x1c
+#define ICU_SET_SEI_AL 0x50
+#define ICU_SET_SEI_AH 0x54
+#define ICU_CLR_SEI_AL 0x58
+#define ICU_CLR_SEI_AH 0x5C
+#define ICU_INT_CFG(x) (0x100 + 4 * (x))
+#define ICU_INT_ENABLE BIT(24)
+#define ICU_IS_EDGE BIT(28)
+#define ICU_GROUP_SHIFT 29
+
+/* ICU definitions */
+#define ICU_MAX_IRQS 207
+#define ICU_SATA0_ICU_ID 109
+#define ICU_SATA1_ICU_ID 107
+
+struct mvebu_icu_subset_data {
+ unsigned int icu_group;
+ unsigned int offset_set_ah;
+ unsigned int offset_set_al;
+ unsigned int offset_clr_ah;
+ unsigned int offset_clr_al;
+};
+
+struct mvebu_icu {
+ void __iomem *base;
+ struct device *dev;
+};
+
+struct mvebu_icu_msi_data {
+ struct mvebu_icu *icu;
+ atomic_t initialized;
+ const struct mvebu_icu_subset_data *subset_data;
+};
+
+struct mvebu_icu_irq_data {
+ struct mvebu_icu *icu;
+ unsigned int icu_group;
+ unsigned int type;
+};
+
+static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
+{
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
+
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
+ return;
+
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
+
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
+
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
+}
+
+static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct irq_data *d = irq_get_irq_data(desc->irq);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
+ struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+ struct mvebu_icu *icu = icu_irqd->icu;
+ unsigned int icu_int;
+
+ if (msg->address_lo || msg->address_hi) {
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
+ /* Configure the ICU with irq number & type */
+ icu_int = msg->data | ICU_INT_ENABLE;
+ if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
+ icu_int |= ICU_IS_EDGE;
+ icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
+ } else {
+ /* De-configure the ICU */
+ icu_int = 0;
+ }
+
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
+
+ /*
+ * The SATA unit has 2 ports, and a dedicated ICU entry per
+ * port. The ahci sata driver supports only one irq interrupt
+ * per SATA unit. To solve this conflict, we configure the 2
+ * SATA wired interrupts in the south bridge into 1 GIC
+ * interrupt in the north bridge. Even if only a single port
+ * is enabled, if sata node is enabled, both interrupts are
+ * configured (regardless of which port is actually in use).
+ */
+ if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+ writel_relaxed(icu_int,
+ icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+ writel_relaxed(icu_int,
+ icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+ }
+}
+
+static struct irq_chip mvebu_icu_nsr_chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static struct irq_chip mvebu_icu_sei_chip = {
+ .name = "ICU-SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int
+mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct mvebu_icu *icu = platform_msi_get_host_data(d);
+ unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
+
+ /* Check the count of the parameters in dt */
+ if (WARN_ON(fwspec->param_count != param_count)) {
+ dev_err(icu->dev, "wrong ICU parameter count %d\n",
+ fwspec->param_count);
+ return -EINVAL;
+ }
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ if (fwspec->param[0] != ICU_GRP_NSR) {
+ dev_err(icu->dev, "wrong ICU group type %x\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+ } else {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The ICU receives level interrupts. While the NSR are also
+ * level interrupts, SEI are edge interrupts. Force the type
+ * here in this case. Please note that this makes the interrupt
+ * handling unreliable.
+ */
+ if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
+ *type = IRQ_TYPE_EDGE_RISING;
+ }
+
+ if (*hwirq >= ICU_MAX_IRQS) {
+ dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int err;
+ unsigned long hwirq;
+ struct irq_fwspec *fwspec = args;
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
+ struct mvebu_icu *icu = msi_data->icu;
+ struct mvebu_icu_irq_data *icu_irqd;
+ struct irq_chip *chip = &mvebu_icu_nsr_chip;
+
+ icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
+ if (!icu_irqd)
+ return -ENOMEM;
+
+ err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
+ &icu_irqd->type);
+ if (err) {
+ dev_err(icu->dev, "failed to translate ICU parameters\n");
+ goto free_irqd;
+ }
+
+ if (static_branch_unlikely(&legacy_bindings))
+ icu_irqd->icu_group = fwspec->param[0];
+ else
+ icu_irqd->icu_group = msi_data->subset_data->icu_group;
+ icu_irqd->icu = icu;
+
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
+ if (err) {
+ dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
+ goto free_irqd;
+ }
+
+ /* Make sure there is no interrupt left pending by the firmware */
+ err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+ if (err)
+ goto free_msi;
+
+ if (icu_irqd->icu_group == ICU_GRP_SEI)
+ chip = &mvebu_icu_sei_chip;
+
+ err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ chip, icu_irqd);
+ if (err) {
+ dev_err(icu->dev, "failed to set the data to IRQ domain\n");
+ goto free_msi;
+ }
+
+ return 0;
+
+free_msi:
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
+free_irqd:
+ kfree(icu_irqd);
+ return err;
+}
+
+static void
+mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_get_irq_data(virq);
+ struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+
+ kfree(icu_irqd);
+
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mvebu_icu_domain_ops = {
+ .translate = mvebu_icu_irq_domain_translate,
+ .alloc = mvebu_icu_irq_domain_alloc,
+ .free = mvebu_icu_irq_domain_free,
+};
+
+static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
+ .icu_group = ICU_GRP_NSR,
+ .offset_set_ah = ICU_SETSPI_NSR_AH,
+ .offset_set_al = ICU_SETSPI_NSR_AL,
+ .offset_clr_ah = ICU_CLRSPI_NSR_AH,
+ .offset_clr_al = ICU_CLRSPI_NSR_AL,
+};
+
+static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = {
+ .icu_group = ICU_GRP_SEI,
+ .offset_set_ah = ICU_SET_SEI_AH,
+ .offset_set_al = ICU_SET_SEI_AL,
+};
+
+static const struct of_device_id mvebu_icu_subset_of_match[] = {
+ {
+ .compatible = "marvell,cp110-icu-nsr",
+ .data = &mvebu_icu_nsr_subset_data,
+ },
+ {
+ .compatible = "marvell,cp110-icu-sei",
+ .data = &mvebu_icu_sei_subset_data,
+ },
+ {},
+};
+
+static int mvebu_icu_subset_probe(struct platform_device *pdev)
+{
+ struct mvebu_icu_msi_data *msi_data;
+ struct device_node *msi_parent_dn;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *irq_domain;
+
+ msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ msi_data->icu = dev_get_drvdata(dev);
+ msi_data->subset_data = &mvebu_icu_nsr_subset_data;
+ } else {
+ msi_data->icu = dev_get_drvdata(dev->parent);
+ msi_data->subset_data = of_device_get_match_data(dev);
+ }
+
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!dev->msi.domain)
+ return -EPROBE_DEFER;
+
+ msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
+ if (!msi_parent_dn)
+ return -ENODEV;
+
+ irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
+ mvebu_icu_write_msg,
+ &mvebu_icu_domain_ops,
+ msi_data);
+ if (!irq_domain) {
+ dev_err(dev, "Failed to create ICU MSI domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mvebu_icu_subset_driver = {
+ .probe = mvebu_icu_subset_probe,
+ .driver = {
+ .name = "mvebu-icu-subset",
+ .of_match_table = mvebu_icu_subset_of_match,
+ },
+};
+builtin_platform_driver(mvebu_icu_subset_driver);
+
+static int mvebu_icu_probe(struct platform_device *pdev)
+{
+ struct mvebu_icu *icu;
+ int i;
+
+ icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
+ GFP_KERNEL);
+ if (!icu)
+ return -ENOMEM;
+
+ icu->dev = &pdev->dev;
+
+ icu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(icu->base))
+ return PTR_ERR(icu->base);
+
+ /*
+ * Legacy bindings: ICU is one node with one MSI parent: force manually
+ * the probe of the NSR interrupts side.
+ * New bindings: ICU node has children, one per interrupt controller
+ * having its own MSI parent: call platform_populate().
+ * All ICU instances should use the same bindings.
+ */
+ if (!of_get_child_count(pdev->dev.of_node))
+ static_branch_enable(&legacy_bindings);
+
+ /*
+ * Clean all ICU interrupts of type NSR and SEI, required to
+ * avoid unpredictable SPI assignments done by firmware.
+ */
+ for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
+ u32 icu_int, icu_grp;
+
+ icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
+ icu_grp = icu_int >> ICU_GROUP_SHIFT;
+
+ if (icu_grp == ICU_GRP_NSR ||
+ (icu_grp == ICU_GRP_SEI &&
+ !static_branch_unlikely(&legacy_bindings)))
+ writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
+ }
+
+ platform_set_drvdata(pdev, icu);
+
+ if (static_branch_unlikely(&legacy_bindings))
+ return mvebu_icu_subset_probe(pdev);
+ else
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id mvebu_icu_of_match[] = {
+ { .compatible = "marvell,cp110-icu", },
+ {},
+};
+
+static struct platform_driver mvebu_icu_driver = {
+ .probe = mvebu_icu_probe,
+ .driver = {
+ .name = "mvebu-icu",
+ .of_match_table = mvebu_icu_of_match,
+ },
+};
+builtin_platform_driver(mvebu_icu_driver);
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 000000000..dc4145abd
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "GIC-ODMI: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_ODMIN_SET 0x40
+#define GICP_ODMI_INT_NUM_SHIFT 12
+#define GICP_ODMIN_GM_EP_R0 0x110
+#define GICP_ODMIN_GM_EP_R1 0x114
+#define GICP_ODMIN_GM_EA_R0 0x108
+#define GICP_ODMIN_GM_EA_R1 0x118
+
+/*
+ * We don't support the group events, so we simply have 8 interrupts
+ * per frame.
+ */
+#define NODMIS_SHIFT 3
+#define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
+#define NODMIS_MASK (NODMIS_PER_FRAME - 1)
+
+struct odmi_data {
+ struct resource res;
+ void __iomem *base;
+ unsigned int spi_base;
+};
+
+static struct odmi_data *odmis;
+static unsigned long *odmis_bm;
+static unsigned int odmis_count;
+
+/* Protects odmis_bm */
+static DEFINE_SPINLOCK(odmis_bm_lock);
+
+static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct odmi_data *odmi;
+ phys_addr_t addr;
+ unsigned int odmin;
+
+ if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
+ return;
+
+ odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
+ odmin = d->hwirq & NODMIS_MASK;
+
+ addr = odmi->res.start + GICP_ODMIN_SET;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
+}
+
+static struct irq_chip odmi_irq_chip = {
+ .name = "ODMI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = odmi_compose_msi_msg,
+};
+
+static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct odmi_data *odmi = NULL;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ unsigned int hwirq, odmin;
+ int ret;
+
+ spin_lock(&odmis_bm_lock);
+ hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
+ if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
+ spin_unlock(&odmis_bm_lock);
+ return -ENOSPC;
+ }
+
+ __set_bit(hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+
+ odmi = &odmis[hwirq >> NODMIS_SHIFT];
+ odmin = hwirq & NODMIS_MASK;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = GIC_SPI;
+ fwspec.param[1] = odmi->spi_base - 32 + odmin;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret) {
+ pr_err("Cannot allocate parent IRQ\n");
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(odmin, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+ return ret;
+ }
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &odmi_irq_chip, NULL);
+
+ return 0;
+}
+
+static void odmi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
+ pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ /* Actually free the MSI */
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(d->hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+}
+
+static const struct irq_domain_ops odmi_domain_ops = {
+ .alloc = odmi_irq_domain_alloc,
+ .free = odmi_irq_domain_free,
+};
+
+static struct irq_chip odmi_msi_irq_chip = {
+ .name = "ODMI",
+};
+
+static struct msi_domain_ops odmi_msi_ops = {
+};
+
+static struct msi_domain_info odmi_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &odmi_msi_ops,
+ .chip = &odmi_msi_irq_chip,
+};
+
+static int __init mvebu_odmi_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *inner_domain, *plat_domain;
+ int ret, i;
+
+ if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
+ return -EINVAL;
+
+ odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
+ if (!odmis)
+ return -ENOMEM;
+
+ odmis_bm = bitmap_zalloc(odmis_count * NODMIS_PER_FRAME, GFP_KERNEL);
+ if (!odmis_bm) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ ret = of_address_to_resource(node, i, &odmi->res);
+ if (ret)
+ goto err_unmap;
+
+ odmi->base = of_io_request_and_map(node, i, "odmi");
+ if (IS_ERR(odmi->base)) {
+ ret = PTR_ERR(odmi->base);
+ goto err_unmap;
+ }
+
+ if (of_property_read_u32_index(node, "marvell,spi-base",
+ i, &odmi->spi_base)) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+ }
+
+ inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ odmis_count * NODMIS_PER_FRAME,
+ &odmi_domain_ops, NULL);
+ if (!inner_domain) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ inner_domain->parent = irq_find_host(parent);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &odmi_msi_domain_info,
+ inner_domain);
+ if (!plat_domain) {
+ ret = -ENOMEM;
+ goto err_remove_inner;
+ }
+
+ return 0;
+
+err_remove_inner:
+ irq_domain_remove(inner_domain);
+err_unmap:
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ if (odmi->base && !IS_ERR(odmi->base))
+ iounmap(odmis[i].base);
+ }
+ bitmap_free(odmis_bm);
+err_alloc:
+ kfree(odmis);
+ return ret;
+}
+
+IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
new file mode 100644
index 000000000..ef3d3646c
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#define PIC_CAUSE 0x0
+#define PIC_MASK 0x4
+
+#define PIC_MAX_IRQS 32
+#define PIC_MAX_IRQ_MASK ((1UL << PIC_MAX_IRQS) - 1)
+
+struct mvebu_pic {
+ void __iomem *base;
+ u32 parent_irq;
+ struct irq_domain *domain;
+ struct platform_device *pdev;
+};
+
+static void mvebu_pic_reset(struct mvebu_pic *pic)
+{
+ /* ACK and mask all interrupts */
+ writel(0, pic->base + PIC_MASK);
+ writel(PIC_MAX_IRQ_MASK, pic->base + PIC_CAUSE);
+}
+
+static void mvebu_pic_eoi_irq(struct irq_data *d)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+
+ writel(1 << d->hwirq, pic->base + PIC_CAUSE);
+}
+
+static void mvebu_pic_mask_irq(struct irq_data *d)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+ u32 reg;
+
+ reg = readl(pic->base + PIC_MASK);
+ reg |= (1 << d->hwirq);
+ writel(reg, pic->base + PIC_MASK);
+}
+
+static void mvebu_pic_unmask_irq(struct irq_data *d)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+ u32 reg;
+
+ reg = readl(pic->base + PIC_MASK);
+ reg &= ~(1 << d->hwirq);
+ writel(reg, pic->base + PIC_MASK);
+}
+
+static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, dev_name(&pic->pdev->dev));
+}
+
+static const struct irq_chip mvebu_pic_chip = {
+ .irq_mask = mvebu_pic_mask_irq,
+ .irq_unmask = mvebu_pic_unmask_irq,
+ .irq_eoi = mvebu_pic_eoi_irq,
+ .irq_print_chip = mvebu_pic_print_chip,
+};
+
+static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct mvebu_pic *pic = domain->host_data;
+
+ irq_set_percpu_devid(virq);
+ irq_set_chip_data(virq, pic);
+ irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_pic_domain_ops = {
+ .map = mvebu_pic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct mvebu_pic *pic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long irqmap, irqn;
+
+ irqmap = readl_relaxed(pic->base + PIC_CAUSE);
+ chained_irq_enter(chip, desc);
+
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG)
+ generic_handle_domain_irq(pic->domain, irqn);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mvebu_pic_enable_percpu_irq(void *data)
+{
+ struct mvebu_pic *pic = data;
+
+ mvebu_pic_reset(pic);
+ enable_percpu_irq(pic->parent_irq, IRQ_TYPE_NONE);
+}
+
+static void mvebu_pic_disable_percpu_irq(void *data)
+{
+ struct mvebu_pic *pic = data;
+
+ disable_percpu_irq(pic->parent_irq);
+}
+
+static int mvebu_pic_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct mvebu_pic *pic;
+
+ pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
+ if (!pic)
+ return -ENOMEM;
+
+ pic->pdev = pdev;
+ pic->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pic->base))
+ return PTR_ERR(pic->base);
+
+ pic->parent_irq = irq_of_parse_and_map(node, 0);
+ if (pic->parent_irq <= 0) {
+ dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
+ return -EINVAL;
+ }
+
+ pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
+ &mvebu_pic_domain_ops, pic);
+ if (!pic->domain) {
+ dev_err(&pdev->dev, "Failed to allocate irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(pic->parent_irq, mvebu_pic_handle_cascade_irq);
+ irq_set_handler_data(pic->parent_irq, pic);
+
+ on_each_cpu(mvebu_pic_enable_percpu_irq, pic, 1);
+
+ platform_set_drvdata(pdev, pic);
+
+ return 0;
+}
+
+static int mvebu_pic_remove(struct platform_device *pdev)
+{
+ struct mvebu_pic *pic = platform_get_drvdata(pdev);
+
+ on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
+ irq_domain_remove(pic->domain);
+
+ return 0;
+}
+
+static const struct of_device_id mvebu_pic_of_match[] = {
+ { .compatible = "marvell,armada-8k-pic", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
+
+static struct platform_driver mvebu_pic_driver = {
+ .probe = mvebu_pic_probe,
+ .remove = mvebu_pic_remove,
+ .driver = {
+ .name = "mvebu-pic",
+ .of_match_table = mvebu_pic_of_match,
+ },
+};
+module_platform_driver(mvebu_pic_driver);
+
+MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mvebu_pic");
+
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
new file mode 100644
index 000000000..4ecef6d83
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "mvebu-sei: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Cause register */
+#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
+/* Mask register */
+#define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
+#define GICP_SET_SEI_OFFSET 0x30
+
+#define SEI_IRQ_COUNT_PER_REG 32
+#define SEI_IRQ_REG_COUNT 2
+#define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
+#define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
+#define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
+
+struct mvebu_sei_interrupt_range {
+ u32 first;
+ u32 size;
+};
+
+struct mvebu_sei_caps {
+ struct mvebu_sei_interrupt_range ap_range;
+ struct mvebu_sei_interrupt_range cp_range;
+};
+
+struct mvebu_sei {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *res;
+ struct irq_domain *sei_domain;
+ struct irq_domain *ap_domain;
+ struct irq_domain *cp_domain;
+ const struct mvebu_sei_caps *caps;
+
+ /* Lock on MSI allocations/releases */
+ struct mutex cp_msi_lock;
+ DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
+
+ /* Lock on IRQ masking register */
+ raw_spinlock_t mask_lock;
+};
+
+static void mvebu_sei_ack_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+
+ writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
+ sei->base + GICP_SECR(reg_idx));
+}
+
+static void mvebu_sei_mask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 1 disables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static void mvebu_sei_unmask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 0 enables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static int mvebu_sei_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static int mvebu_sei_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ /* We can only clear the pending state by acking the interrupt */
+ if (which != IRQCHIP_STATE_PENDING || state)
+ return -EINVAL;
+
+ mvebu_sei_ack_irq(d);
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_irq_chip = {
+ .name = "SEI",
+ .irq_ack = mvebu_sei_ack_irq,
+ .irq_mask = mvebu_sei_mask_irq,
+ .irq_unmask = mvebu_sei_unmask_irq,
+ .irq_set_affinity = mvebu_sei_set_affinity,
+ .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
+};
+
+static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_ap_irq_chip = {
+ .name = "AP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_ap_set_type,
+};
+
+static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct mvebu_sei *sei = data->chip_data;
+ phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
+
+ msg->data = data->hwirq + sei->caps->cp_range.first;
+ msg->address_lo = lower_32_bits(set);
+ msg->address_hi = upper_32_bits(set);
+}
+
+static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_cp_irq_chip = {
+ .name = "CP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_cp_set_type,
+ .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
+};
+
+static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+
+ /* Not much to do, just setup the irqdata */
+ irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
+ &mvebu_sei_irq_chip, sei);
+
+ return 0;
+}
+
+static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops mvebu_sei_domain_ops = {
+ .alloc = mvebu_sei_domain_alloc,
+ .free = mvebu_sei_domain_free,
+};
+
+static int mvebu_sei_ap_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int err;
+
+ mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->ap_range.first;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_ap_irq_chip, sei,
+ handle_level_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
+ .translate = mvebu_sei_ap_translate,
+ .alloc = mvebu_sei_ap_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
+{
+ mutex_lock(&sei->cp_msi_lock);
+ clear_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+}
+
+static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ int ret;
+
+ /* The software only supports single allocations for now */
+ if (nr_irqs != 1)
+ return -ENOTSUPP;
+
+ mutex_lock(&sei->cp_msi_lock);
+ hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
+ sei->caps->cp_range.size);
+ if (hwirq < sei->caps->cp_range.size)
+ set_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+
+ if (hwirq == sei->caps->cp_range.size)
+ return -ENOSPC;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->cp_range.first;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ goto free_irq;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_cp_irq_chip, sei,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+
+free_irq:
+ mvebu_sei_cp_release_irq(sei, hwirq);
+ return ret;
+}
+
+static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
+ dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ mvebu_sei_cp_release_irq(sei, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+}
+
+static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
+ .alloc = mvebu_sei_cp_domain_alloc,
+ .free = mvebu_sei_cp_domain_free,
+};
+
+static struct irq_chip mvebu_sei_msi_irq_chip = {
+ .name = "SEI pMSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static struct msi_domain_ops mvebu_sei_msi_ops = {
+};
+
+static struct msi_domain_info mvebu_sei_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .ops = &mvebu_sei_msi_ops,
+ .chip = &mvebu_sei_msi_irq_chip,
+};
+
+static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
+ unsigned long irqmap;
+ int bit;
+
+ irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
+ for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
+ unsigned long hwirq;
+ int err;
+
+ hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
+ err = generic_handle_domain_irq(sei->sei_domain, hwirq);
+ if (unlikely(err))
+ dev_warn(sei->dev, "Spurious IRQ detected (hwirq %lu)\n", hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mvebu_sei_reset(struct mvebu_sei *sei)
+{
+ u32 reg_idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
+ }
+}
+
+static int mvebu_sei_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct irq_domain *plat_domain;
+ struct mvebu_sei *sei;
+ u32 parent_irq;
+ int ret;
+
+ sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
+ if (!sei)
+ return -ENOMEM;
+
+ sei->dev = &pdev->dev;
+
+ mutex_init(&sei->cp_msi_lock);
+ raw_spin_lock_init(&sei->mask_lock);
+
+ sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sei->base = devm_ioremap_resource(sei->dev, sei->res);
+ if (IS_ERR(sei->base))
+ return PTR_ERR(sei->base);
+
+ /* Retrieve the SEI capabilities with the interrupt ranges */
+ sei->caps = of_device_get_match_data(&pdev->dev);
+ if (!sei->caps) {
+ dev_err(sei->dev,
+ "Could not retrieve controller capabilities\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Reserve the single (top-level) parent SPI IRQ from which all the
+ * interrupts handled by this driver will be signaled.
+ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Create the root SEI domain */
+ sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ (sei->caps->ap_range.size +
+ sei->caps->cp_range.size),
+ &mvebu_sei_domain_ops,
+ sei);
+ if (!sei->sei_domain) {
+ dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
+
+ /* Create the 'wired' domain */
+ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->ap_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_ap_domain_ops,
+ sei);
+ if (!sei->ap_domain) {
+ dev_err(sei->dev, "Failed to create AP IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_sei_domain;
+ }
+
+ irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
+
+ /* Create the 'MSI' domain */
+ sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->cp_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_cp_domain_ops,
+ sei);
+ if (!sei->cp_domain) {
+ pr_err("Failed to create CPs IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_ap_domain;
+ }
+
+ irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &mvebu_sei_msi_domain_info,
+ sei->cp_domain);
+ if (!plat_domain) {
+ pr_err("Failed to create CPs MSI domain\n");
+ ret = -ENOMEM;
+ goto remove_cp_domain;
+ }
+
+ mvebu_sei_reset(sei);
+
+ irq_set_chained_handler_and_data(parent_irq,
+ mvebu_sei_handle_cascade_irq,
+ sei);
+
+ return 0;
+
+remove_cp_domain:
+ irq_domain_remove(sei->cp_domain);
+remove_ap_domain:
+ irq_domain_remove(sei->ap_domain);
+remove_sei_domain:
+ irq_domain_remove(sei->sei_domain);
+dispose_irq:
+ irq_dispose_mapping(parent_irq);
+
+ return ret;
+}
+
+static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+ .ap_range = {
+ .first = 0,
+ .size = 21,
+ },
+ .cp_range = {
+ .first = 21,
+ .size = 43,
+ },
+};
+
+static const struct of_device_id mvebu_sei_of_match[] = {
+ {
+ .compatible = "marvell,ap806-sei",
+ .data = &mvebu_sei_ap806_caps,
+ },
+ {},
+};
+
+static struct platform_driver mvebu_sei_driver = {
+ .probe = mvebu_sei_probe,
+ .driver = {
+ .name = "mvebu-sei",
+ .of_match_table = mvebu_sei_of_match,
+ },
+};
+builtin_platform_driver(mvebu_sei_driver);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
new file mode 100644
index 000000000..55cb6b5a6
--- /dev/null
+++ b/drivers/irqchip/irq-mxs.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
+ * Add Alphascale ASM9260 support.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/stmp_device.h>
+#include <asm/exception.h>
+
+#include "alphascale_asm9260-icoll.h"
+
+/*
+ * this device provide 4 offsets for each register:
+ * 0x0 - plain read write mode
+ * 0x4 - set mode, OR logic.
+ * 0x8 - clr mode, XOR logic.
+ * 0xc - togle mode.
+ */
+#define SET_REG 4
+#define CLR_REG 8
+
+#define HW_ICOLL_VECTOR 0x0000
+#define HW_ICOLL_LEVELACK 0x0010
+#define HW_ICOLL_CTRL 0x0020
+#define HW_ICOLL_STAT_OFFSET 0x0070
+#define HW_ICOLL_INTERRUPT0 0x0120
+#define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10)
+#define BM_ICOLL_INTR_ENABLE BIT(2)
+#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1
+
+#define ICOLL_NUM_IRQS 128
+
+enum icoll_type {
+ ICOLL,
+ ASM9260_ICOLL,
+};
+
+struct icoll_priv {
+ void __iomem *vector;
+ void __iomem *levelack;
+ void __iomem *ctrl;
+ void __iomem *stat;
+ void __iomem *intr;
+ void __iomem *clear;
+ enum icoll_type type;
+};
+
+static struct icoll_priv icoll_priv;
+static struct irq_domain *icoll_domain;
+
+/* calculate bit offset depending on number of interrupt per register */
+static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
+{
+ /*
+ * mask lower part of hwirq to convert it
+ * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3)
+ */
+ return bit << ((d->hwirq & 3) << 3);
+}
+
+/* calculate mem offset depending on number of interrupt per register */
+static void __iomem *icoll_intr_reg(struct irq_data *d)
+{
+ /* offset = hwirq / intr_per_reg * 0x10 */
+ return icoll_priv.intr + ((d->hwirq >> 2) * 0x10);
+}
+
+static void icoll_ack_irq(struct irq_data *d)
+{
+ /*
+ * The Interrupt Collector is able to prioritize irqs.
+ * Currently only level 0 is used. So acking can use
+ * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
+ */
+ __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
+ icoll_priv.levelack);
+}
+
+static void icoll_mask_irq(struct irq_data *d)
+{
+ __raw_writel(BM_ICOLL_INTR_ENABLE,
+ icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
+}
+
+static void icoll_unmask_irq(struct irq_data *d)
+{
+ __raw_writel(BM_ICOLL_INTR_ENABLE,
+ icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
+}
+
+static void asm9260_mask_irq(struct irq_data *d)
+{
+ __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+ icoll_intr_reg(d) + CLR_REG);
+}
+
+static void asm9260_unmask_irq(struct irq_data *d)
+{
+ __raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq),
+ icoll_priv.clear +
+ ASM9260_HW_ICOLL_CLEARn(d->hwirq));
+
+ __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
+ icoll_intr_reg(d) + SET_REG);
+}
+
+static struct irq_chip mxs_icoll_chip = {
+ .irq_ack = icoll_ack_irq,
+ .irq_mask = icoll_mask_irq,
+ .irq_unmask = icoll_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct irq_chip asm9260_icoll_chip = {
+ .irq_ack = icoll_ack_irq,
+ .irq_mask = asm9260_mask_irq,
+ .irq_unmask = asm9260_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+{
+ u32 irqnr;
+
+ irqnr = __raw_readl(icoll_priv.stat);
+ __raw_writel(irqnr, icoll_priv.vector);
+ generic_handle_domain_irq(icoll_domain, irqnr);
+}
+
+static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ if (icoll_priv.type == ICOLL)
+ chip = &mxs_icoll_chip;
+ else
+ chip = &asm9260_icoll_chip;
+
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops icoll_irq_domain_ops = {
+ .map = icoll_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void __init icoll_add_domain(struct device_node *np,
+ int num)
+{
+ icoll_domain = irq_domain_add_linear(np, num,
+ &icoll_irq_domain_ops, NULL);
+
+ if (!icoll_domain)
+ panic("%pOF: unable to create irq domain", np);
+}
+
+static void __iomem * __init icoll_init_iobase(struct device_node *np)
+{
+ void __iomem *icoll_base;
+
+ icoll_base = of_io_request_and_map(np, 0, np->name);
+ if (IS_ERR(icoll_base))
+ panic("%pOF: unable to map resource", np);
+ return icoll_base;
+}
+
+static int __init icoll_of_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ void __iomem *icoll_base;
+
+ icoll_priv.type = ICOLL;
+
+ icoll_base = icoll_init_iobase(np);
+ icoll_priv.vector = icoll_base + HW_ICOLL_VECTOR;
+ icoll_priv.levelack = icoll_base + HW_ICOLL_LEVELACK;
+ icoll_priv.ctrl = icoll_base + HW_ICOLL_CTRL;
+ icoll_priv.stat = icoll_base + HW_ICOLL_STAT_OFFSET;
+ icoll_priv.intr = icoll_base + HW_ICOLL_INTERRUPT0;
+ icoll_priv.clear = NULL;
+
+ /*
+ * Interrupt Collector reset, which initializes the priority
+ * for each irq to level 0.
+ */
+ stmp_reset_block(icoll_priv.ctrl);
+
+ icoll_add_domain(np, ICOLL_NUM_IRQS);
+
+ return 0;
+}
+IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
+
+static int __init asm9260_of_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ void __iomem *icoll_base;
+ int i;
+
+ icoll_priv.type = ASM9260_ICOLL;
+
+ icoll_base = icoll_init_iobase(np);
+ icoll_priv.vector = icoll_base + ASM9260_HW_ICOLL_VECTOR;
+ icoll_priv.levelack = icoll_base + ASM9260_HW_ICOLL_LEVELACK;
+ icoll_priv.ctrl = icoll_base + ASM9260_HW_ICOLL_CTRL;
+ icoll_priv.stat = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET;
+ icoll_priv.intr = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0;
+ icoll_priv.clear = icoll_base + ASM9260_HW_ICOLL_CLEAR0;
+
+ writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE,
+ icoll_priv.ctrl);
+ /*
+ * ASM9260 don't provide reset bit. So, we need to set level 0
+ * manually.
+ */
+ for (i = 0; i < 16 * 0x10; i += 0x10)
+ writel(0, icoll_priv.intr + i);
+
+ icoll_add_domain(np, ASM9260_NUM_IRQS);
+ set_handle_irq(icoll_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
new file mode 100644
index 000000000..ba6332b00
--- /dev/null
+++ b/drivers/irqchip/irq-nvic.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/irq/irq-nvic.c
+ *
+ * Copyright (C) 2008 ARM Limited, All Rights Reserved.
+ * Copyright (C) 2013 Pengutronix
+ *
+ * Support for the Nested Vectored Interrupt Controller found on the
+ * ARMv7-M CPUs (Cortex-M3/M4)
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include <asm/v7m.h>
+#include <asm/exception.h>
+
+#define NVIC_ISER 0x000
+#define NVIC_ICER 0x080
+#define NVIC_IPR 0x400
+
+#define NVIC_MAX_BANKS 16
+/*
+ * Each bank handles 32 irqs. Only the 16th (= last) bank handles only
+ * 16 irqs.
+ */
+#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
+
+static struct irq_domain *nvic_irq_domain;
+
+static void __irq_entry nvic_handle_irq(struct pt_regs *regs)
+{
+ unsigned long icsr = readl_relaxed(BASEADDR_V7M_SCB + V7M_SCB_ICSR);
+ irq_hw_number_t hwirq = (icsr & V7M_SCB_ICSR_VECTACTIVE) - 16;
+
+ generic_handle_domain_irq(nvic_irq_domain, hwirq);
+}
+
+static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_map_generic_chip(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static const struct irq_domain_ops nvic_irq_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = nvic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+static int __init nvic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int irqs, i, ret, numbanks;
+ void __iomem *nvic_base;
+
+ numbanks = (readl_relaxed(V7M_SCS_ICTR) &
+ V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
+
+ nvic_base = of_iomap(node, 0);
+ if (!nvic_base) {
+ pr_warn("unable to map nvic registers\n");
+ return -ENOMEM;
+ }
+
+ irqs = numbanks * 32;
+ if (irqs > NVIC_MAX_IRQ)
+ irqs = NVIC_MAX_IRQ;
+
+ nvic_irq_domain =
+ irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
+
+ if (!nvic_irq_domain) {
+ pr_warn("Failed to allocate irq domain\n");
+ iounmap(nvic_base);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
+ "nvic_irq", handle_fasteoi_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_warn("Failed to allocate irq chips\n");
+ irq_domain_remove(nvic_irq_domain);
+ iounmap(nvic_base);
+ return ret;
+ }
+
+ for (i = 0; i < numbanks; ++i) {
+ struct irq_chip_generic *gc;
+
+ gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
+ gc->reg_base = nvic_base + 4 * i;
+ gc->chip_types[0].regs.enable = NVIC_ISER;
+ gc->chip_types[0].regs.disable = NVIC_ICER;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ /* This is a no-op as end of interrupt is signaled by the
+ * exception return sequence.
+ */
+ gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
+
+ /* disable interrupts */
+ writel_relaxed(~0, gc->reg_base + NVIC_ICER);
+ }
+
+ /* Set priority on all interrupts */
+ for (i = 0; i < irqs; i += 4)
+ writel_relaxed(0, nvic_base + NVIC_IPR + i);
+
+ set_handle_irq(nvic_handle_irq);
+ return 0;
+}
+IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
new file mode 100644
index 000000000..dc82162ba
--- /dev/null
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -0,0 +1,394 @@
+/*
+ * linux/arch/arm/mach-omap2/irq.c
+ *
+ * Interrupt handler for OMAP2 boards.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <asm/exception.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/irq-omap-intc.h>
+
+/* selected INTC register offsets */
+
+#define INTC_REVISION 0x0000
+#define INTC_SYSCONFIG 0x0010
+#define INTC_SYSSTATUS 0x0014
+#define INTC_SIR 0x0040
+#define INTC_CONTROL 0x0048
+#define INTC_PROTECTION 0x004C
+#define INTC_IDLE 0x0050
+#define INTC_THRESHOLD 0x0068
+#define INTC_MIR0 0x0084
+#define INTC_MIR_CLEAR0 0x0088
+#define INTC_MIR_SET0 0x008c
+#define INTC_PENDING_IRQ0 0x0098
+#define INTC_PENDING_IRQ1 0x00b8
+#define INTC_PENDING_IRQ2 0x00d8
+#define INTC_PENDING_IRQ3 0x00f8
+#define INTC_ILR0 0x0100
+
+#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
+#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
+#define INTCPS_NR_ILR_REGS 128
+#define INTCPS_NR_MIR_REGS 4
+
+#define INTC_IDLE_FUNCIDLE (1 << 0)
+#define INTC_IDLE_TURBO (1 << 1)
+
+#define INTC_PROTECTION_ENABLE (1 << 0)
+
+struct omap_intc_regs {
+ u32 sysconfig;
+ u32 protection;
+ u32 idle;
+ u32 threshold;
+ u32 ilr[INTCPS_NR_ILR_REGS];
+ u32 mir[INTCPS_NR_MIR_REGS];
+};
+static struct omap_intc_regs intc_context;
+
+static struct irq_domain *domain;
+static void __iomem *omap_irq_base;
+static int omap_nr_pending;
+static int omap_nr_irqs;
+
+static void intc_writel(u32 reg, u32 val)
+{
+ writel_relaxed(val, omap_irq_base + reg);
+}
+
+static u32 intc_readl(u32 reg)
+{
+ return readl_relaxed(omap_irq_base + reg);
+}
+
+void omap_intc_save_context(void)
+{
+ int i;
+
+ intc_context.sysconfig =
+ intc_readl(INTC_SYSCONFIG);
+ intc_context.protection =
+ intc_readl(INTC_PROTECTION);
+ intc_context.idle =
+ intc_readl(INTC_IDLE);
+ intc_context.threshold =
+ intc_readl(INTC_THRESHOLD);
+
+ for (i = 0; i < omap_nr_irqs; i++)
+ intc_context.ilr[i] =
+ intc_readl((INTC_ILR0 + 0x4 * i));
+ for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
+ intc_context.mir[i] =
+ intc_readl(INTC_MIR0 + (0x20 * i));
+}
+
+void omap_intc_restore_context(void)
+{
+ int i;
+
+ intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
+ intc_writel(INTC_PROTECTION, intc_context.protection);
+ intc_writel(INTC_IDLE, intc_context.idle);
+ intc_writel(INTC_THRESHOLD, intc_context.threshold);
+
+ for (i = 0; i < omap_nr_irqs; i++)
+ intc_writel(INTC_ILR0 + 0x4 * i,
+ intc_context.ilr[i]);
+
+ for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
+ intc_writel(INTC_MIR0 + 0x20 * i,
+ intc_context.mir[i]);
+ /* MIRs are saved and restore with other PRCM registers */
+}
+
+void omap3_intc_prepare_idle(void)
+{
+ /*
+ * Disable autoidle as it can stall interrupt controller,
+ * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
+ */
+ intc_writel(INTC_SYSCONFIG, 0);
+ intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
+}
+
+void omap3_intc_resume_idle(void)
+{
+ /* Re-enable autoidle */
+ intc_writel(INTC_SYSCONFIG, 1);
+ intc_writel(INTC_IDLE, 0);
+}
+
+/* XXX: FIQ and additional INTC support (only MPU at the moment) */
+static void omap_ack_irq(struct irq_data *d)
+{
+ intc_writel(INTC_CONTROL, 0x1);
+}
+
+static void omap_mask_ack_irq(struct irq_data *d)
+{
+ irq_gc_mask_disable_reg(d);
+ omap_ack_irq(d);
+}
+
+static void __init omap_irq_soft_reset(void)
+{
+ unsigned long tmp;
+
+ tmp = intc_readl(INTC_REVISION) & 0xff;
+
+ pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
+ omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
+
+ tmp = intc_readl(INTC_SYSCONFIG);
+ tmp |= 1 << 1; /* soft reset */
+ intc_writel(INTC_SYSCONFIG, tmp);
+
+ while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
+ /* Wait for reset to complete */;
+
+ /* Enable autoidle */
+ intc_writel(INTC_SYSCONFIG, 1 << 0);
+}
+
+int omap_irq_pending(void)
+{
+ int i;
+
+ for (i = 0; i < omap_nr_pending; i++)
+ if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
+ return 1;
+ return 0;
+}
+
+void omap3_intc_suspend(void)
+{
+ /* A pending interrupt would prevent OMAP from entering suspend */
+ omap_ack_irq(NULL);
+}
+
+static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
+{
+ int ret;
+ int i;
+
+ ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
+ handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
+ IRQ_LEVEL, 0);
+ if (ret) {
+ pr_warn("Failed to allocate irq chips\n");
+ return ret;
+ }
+
+ for (i = 0; i < omap_nr_pending; i++) {
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_get_domain_generic_chip(d, 32 * i);
+ gc->reg_base = base;
+ ct = gc->chip_types;
+
+ ct->type = IRQ_TYPE_LEVEL_MASK;
+
+ ct->chip.irq_ack = omap_mask_ack_irq;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+
+ ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+
+ ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
+ ct->regs.disable = INTC_MIR_SET0 + 32 * i;
+ }
+
+ return 0;
+}
+
+static void __init omap_alloc_gc_legacy(void __iomem *base,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
+ handle_level_irq);
+ ct = gc->chip_types;
+ ct->chip.irq_ack = omap_mask_ack_irq;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+
+ ct->regs.enable = INTC_MIR_CLEAR0;
+ ct->regs.disable = INTC_MIR_SET0;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
+static int __init omap_init_irq_of(struct device_node *node)
+{
+ int ret;
+
+ omap_irq_base = of_iomap(node, 0);
+ if (WARN_ON(!omap_irq_base))
+ return -ENOMEM;
+
+ domain = irq_domain_add_linear(node, omap_nr_irqs,
+ &irq_generic_chip_ops, NULL);
+
+ omap_irq_soft_reset();
+
+ ret = omap_alloc_gc_of(domain, omap_irq_base);
+ if (ret < 0)
+ irq_domain_remove(domain);
+
+ return ret;
+}
+
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
+{
+ int j, irq_base;
+
+ omap_irq_base = ioremap(base, SZ_4K);
+ if (WARN_ON(!omap_irq_base))
+ return -ENOMEM;
+
+ irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
+ if (irq_base < 0) {
+ pr_warn("Couldn't allocate IRQ numbers\n");
+ irq_base = 0;
+ }
+
+ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ omap_irq_soft_reset();
+
+ for (j = 0; j < omap_nr_irqs; j += 32)
+ omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
+
+ return 0;
+}
+
+static void __init omap_irq_enable_protection(void)
+{
+ u32 reg;
+
+ reg = intc_readl(INTC_PROTECTION);
+ reg |= INTC_PROTECTION_ENABLE;
+ intc_writel(INTC_PROTECTION, reg);
+}
+
+static int __init omap_init_irq(u32 base, struct device_node *node)
+{
+ int ret;
+
+ /*
+ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+ * depends is still not ready for linear IRQ domains; because of that
+ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+ * linear IRQ Domain until that driver is finally fixed.
+ */
+ if (of_device_is_compatible(node, "ti,omap2-intc") ||
+ of_device_is_compatible(node, "ti,omap3-intc")) {
+ struct resource res;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -ENOMEM;
+
+ base = res.start;
+ ret = omap_init_irq_legacy(base, node);
+ } else if (node) {
+ ret = omap_init_irq_of(node);
+ } else {
+ ret = omap_init_irq_legacy(base, NULL);
+ }
+
+ if (ret == 0)
+ omap_irq_enable_protection();
+
+ return ret;
+}
+
+static asmlinkage void __exception_irq_entry
+omap_intc_handle_irq(struct pt_regs *regs)
+{
+ extern unsigned long irq_err_count;
+ u32 irqnr;
+
+ irqnr = intc_readl(INTC_SIR);
+
+ /*
+ * A spurious IRQ can result if interrupt that triggered the
+ * sorting is no longer active during the sorting (10 INTC
+ * functional clock cycles after interrupt assertion). Or a
+ * change in interrupt mask affected the result during sorting
+ * time. There is no special handling required except ignoring
+ * the SIR register value just read and retrying.
+ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
+ *
+ * Many a times, a spurious interrupt situation has been fixed
+ * by adding a flush for the posted write acking the IRQ in
+ * the device driver. Typically, this is going be the device
+ * driver whose interrupt was handled just before the spurious
+ * IRQ occurred. Pay attention to those device drivers if you
+ * run into hitting the spurious IRQ condition below.
+ */
+ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
+ pr_err_once("%s: spurious irq!\n", __func__);
+ irq_err_count++;
+ omap_ack_irq(NULL);
+ return;
+ }
+
+ irqnr &= ACTIVEIRQ_MASK;
+ generic_handle_domain_irq(domain, irqnr);
+}
+
+static int __init intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ omap_nr_pending = 3;
+ omap_nr_irqs = 96;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ if (of_device_is_compatible(node, "ti,dm814-intc") ||
+ of_device_is_compatible(node, "ti,dm816-intc") ||
+ of_device_is_compatible(node, "ti,am33xx-intc")) {
+ omap_nr_irqs = 128;
+ omap_nr_pending = 4;
+ }
+
+ ret = omap_init_irq(-1, of_node_get(node));
+ if (ret < 0)
+ return ret;
+
+ set_handle_irq(omap_intc_handle_irq);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
+IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
+IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
+IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
+IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 000000000..e66ef4373
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ * Control register
+ * +---------+---------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * ----------+---------+----------+----------
+ * | IRQ ACK | IRQ GEN | DST CORE | DATA |
+ * +---------+---------+----------+---------+
+ *
+ * Status register
+ * +----------+-------------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * -----------+-------------+----------+---------+
+ * | Reserved | IRQ Pending | SRC CORE | DATA |
+ * +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ * ready. Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ * +---------+ +---------+
+ * | CPU | | CPU |
+ * | Core 0 |<==\ (memory access) /==>| Core 1 |
+ * | [ PIC ]| | | | [ PIC ]|
+ * +----^-^--+ | | +----^-^--+
+ * | | v v | |
+ * <====|=|=================================|=|==> (memory bus)
+ * | | ^ ^ | |
+ * (ipi | +------|---------+--------|-------|-+ (device irq)
+ * irq | | | | |
+ * core0)| +------|---------|--------|-------+ (ipi irq core1)
+ * | | | | |
+ * +----o-o-+ | +--------+ |
+ * | ompic |<===/ | Device |<===/
+ * | IPI | +--------+
+ * +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES 8
+#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING (1 << 30)
+
+#define OMPIC_DATA(x) ((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+ return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+ iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+ unsigned int ipi_msg)
+{
+ unsigned int dst_cpu;
+ unsigned int src_cpu = smp_processor_id();
+
+ for_each_cpu(dst_cpu, mask) {
+ set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+ /*
+ * On OpenRISC the atomic set_bit() call implies a memory
+ * barrier. Otherwise we would need: smp_wmb(); paired
+ * with the read in ompic_ipi_handler.
+ */
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+ OMPIC_CTRL_IRQ_GEN |
+ OMPIC_CTRL_DST(dst_cpu) |
+ OMPIC_DATA(1));
+ }
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ops = &per_cpu(ops, cpu);
+ unsigned long ops;
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+ while ((ops = xchg(pending_ops, 0)) != 0) {
+
+ /*
+ * On OpenRISC the atomic xchg() call implies a memory
+ * barrier. Otherwise we may need an smp_rmb(); paired
+ * with the write in ompic_raise_softirq.
+ */
+
+ do {
+ unsigned long ipi_msg;
+
+ ipi_msg = __ffs(ops);
+ ops &= ~(1UL << ipi_msg);
+
+ handle_IPI(ipi_msg);
+ } while (ops);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int irq;
+ int ret;
+
+ /* Validate the DT */
+ if (ompic_base) {
+ pr_err("ompic: duplicate ompic's are not supported");
+ return -EEXIST;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("ompic: reg property requires an address and size");
+ return -EINVAL;
+ }
+
+ if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+ pr_err("ompic: reg size, currently %d must be at least %d",
+ resource_size(&res),
+ (num_possible_cpus() * OMPIC_CPUBYTES));
+ return -EINVAL;
+ }
+
+ /* Setup the device */
+ ompic_base = ioremap(res.start, resource_size(&res));
+ if (!ompic_base) {
+ pr_err("ompic: unable to map registers");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("ompic: unable to parse device irq");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+ "ompic_ipi", NULL);
+ if (ret)
+ goto out_irq_disp;
+
+ set_smp_cross_call(ompic_raise_softirq);
+
+ return 0;
+
+out_irq_disp:
+ irq_dispose_mapping(irq);
+out_unmap:
+ iounmap(ompic_base);
+ ompic_base = NULL;
+ return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
new file mode 100644
index 000000000..f289ccd95
--- /dev/null
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2014 Stefan Kristansson <stefan.kristiansson@saunalahti.fi>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+/* OR1K PIC implementation */
+
+struct or1k_pic_dev {
+ struct irq_chip chip;
+ irq_flow_handler_t handle;
+ unsigned long flags;
+};
+
+/*
+ * We're a couple of cycles faster than the generic implementations with
+ * these 'fast' versions.
+ */
+
+static void or1k_pic_mask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+}
+
+static void or1k_pic_unmask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->hwirq));
+}
+
+static void or1k_pic_ack(struct irq_data *data)
+{
+ mtspr(SPR_PICSR, (1UL << data->hwirq));
+}
+
+static void or1k_pic_mask_ack(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+ mtspr(SPR_PICSR, (1UL << data->hwirq));
+}
+
+/*
+ * There are two oddities with the OR1200 PIC implementation:
+ * i) LEVEL-triggered interrupts are latched and need to be cleared
+ * ii) the interrupt latch is cleared by writing a 0 to the bit,
+ * as opposed to a 1 as mandated by the spec
+ */
+static void or1k_pic_or1200_ack(struct irq_data *data)
+{
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
+}
+
+static void or1k_pic_or1200_mask_ack(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
+}
+
+static struct or1k_pic_dev or1k_pic_level = {
+ .chip = {
+ .name = "or1k-PIC-level",
+ .irq_unmask = or1k_pic_unmask,
+ .irq_mask = or1k_pic_mask,
+ },
+ .handle = handle_level_irq,
+ .flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct or1k_pic_dev or1k_pic_edge = {
+ .chip = {
+ .name = "or1k-PIC-edge",
+ .irq_unmask = or1k_pic_unmask,
+ .irq_mask = or1k_pic_mask,
+ .irq_ack = or1k_pic_ack,
+ .irq_mask_ack = or1k_pic_mask_ack,
+ },
+ .handle = handle_edge_irq,
+ .flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct or1k_pic_dev or1k_pic_or1200 = {
+ .chip = {
+ .name = "or1200-PIC",
+ .irq_unmask = or1k_pic_unmask,
+ .irq_mask = or1k_pic_mask,
+ .irq_ack = or1k_pic_or1200_ack,
+ .irq_mask_ack = or1k_pic_or1200_mask_ack,
+ },
+ .handle = handle_level_irq,
+ .flags = IRQ_LEVEL | IRQ_NOPROBE,
+};
+
+static struct irq_domain *root_domain;
+
+static inline int pic_get_irq(int first)
+{
+ int hwirq;
+
+ hwirq = ffs(mfspr(SPR_PICSR) >> first);
+ if (!hwirq)
+ return NO_IRQ;
+ else
+ hwirq = hwirq + first - 1;
+
+ return hwirq;
+}
+
+static void or1k_pic_handle_irq(struct pt_regs *regs)
+{
+ int irq = -1;
+
+ while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
+ generic_handle_domain_irq(root_domain, irq);
+}
+
+static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct or1k_pic_dev *pic = d->host_data;
+
+ irq_set_chip_and_handler(irq, &pic->chip, pic->handle);
+ irq_set_status_flags(irq, pic->flags);
+
+ return 0;
+}
+
+static const struct irq_domain_ops or1k_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = or1k_map,
+};
+
+/*
+ * This sets up the IRQ domain for the PIC built in to the OpenRISC
+ * 1000 CPU. This is the "root" domain as these are the interrupts
+ * that directly trigger an exception in the CPU.
+ */
+static int __init or1k_pic_init(struct device_node *node,
+ struct or1k_pic_dev *pic)
+{
+ /* Disable all interrupts until explicitly requested */
+ mtspr(SPR_PICMR, (0UL));
+
+ root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
+ pic);
+
+ set_handle_irq(or1k_pic_handle_irq);
+
+ return 0;
+}
+
+static int __init or1k_pic_or1200_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return or1k_pic_init(node, &or1k_pic_or1200);
+}
+IRQCHIP_DECLARE(or1k_pic_or1200, "opencores,or1200-pic", or1k_pic_or1200_init);
+IRQCHIP_DECLARE(or1k_pic, "opencores,or1k-pic", or1k_pic_or1200_init);
+
+static int __init or1k_pic_level_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return or1k_pic_init(node, &or1k_pic_level);
+}
+IRQCHIP_DECLARE(or1k_pic_level, "opencores,or1k-pic-level",
+ or1k_pic_level_init);
+
+static int __init or1k_pic_edge_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return or1k_pic_init(node, &or1k_pic_edge);
+}
+IRQCHIP_DECLARE(or1k_pic_edge, "opencores,or1k-pic-edge", or1k_pic_edge_init);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
new file mode 100644
index 000000000..17c2c7a07
--- /dev/null
+++ b/drivers/irqchip/irq-orion.c
@@ -0,0 +1,206 @@
+/*
+ * Marvell Orion SoCs IRQ chip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+/*
+ * Orion SoC main interrupt controller
+ */
+#define ORION_IRQS_PER_CHIP 32
+
+#define ORION_IRQ_CAUSE 0x00
+#define ORION_IRQ_MASK 0x04
+#define ORION_IRQ_FIQ_MASK 0x08
+#define ORION_IRQ_ENDP_MASK 0x0c
+
+static struct irq_domain *orion_irq_domain;
+
+static void
+__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
+{
+ struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
+ int n, base = 0;
+
+ for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
+ struct irq_chip_generic *gc =
+ irq_get_domain_generic_chip(orion_irq_domain, base);
+ u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
+ gc->mask_cache;
+ while (stat) {
+ u32 hwirq = __fls(stat);
+ generic_handle_domain_irq(orion_irq_domain,
+ gc->irq_base + hwirq);
+ stat &= ~(1 << hwirq);
+ }
+ }
+}
+
+static int __init orion_irq_init(struct device_node *np,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int n, ret, base, num_chips = 0;
+ struct resource r;
+
+ /* count number of irq chips by valid reg addresses */
+ while (of_address_to_resource(np, num_chips, &r) == 0)
+ num_chips++;
+
+ orion_irq_domain = irq_domain_add_linear(np,
+ num_chips * ORION_IRQS_PER_CHIP,
+ &irq_generic_chip_ops, NULL);
+ if (!orion_irq_domain)
+ panic("%pOFn: unable to add irq domain\n", np);
+
+ ret = irq_alloc_domain_generic_chips(orion_irq_domain,
+ ORION_IRQS_PER_CHIP, 1, np->full_name,
+ handle_level_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret)
+ panic("%pOFn: unable to alloc irq domain gc\n", np);
+
+ for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
+ struct irq_chip_generic *gc =
+ irq_get_domain_generic_chip(orion_irq_domain, base);
+
+ of_address_to_resource(np, n, &r);
+
+ if (!request_mem_region(r.start, resource_size(&r), np->name))
+ panic("%pOFn: unable to request mem region %d",
+ np, n);
+
+ gc->reg_base = ioremap(r.start, resource_size(&r));
+ if (!gc->reg_base)
+ panic("%pOFn: unable to map resource %d", np, n);
+
+ gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ /* mask all interrupts */
+ writel(0, gc->reg_base + ORION_IRQ_MASK);
+ }
+
+ set_handle_irq(orion_handle_irq);
+ return 0;
+}
+IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
+
+/*
+ * Orion SoC bridge interrupt controller
+ */
+#define ORION_BRIDGE_IRQ_CAUSE 0x00
+#define ORION_BRIDGE_IRQ_MASK 0x04
+
+static void orion_bridge_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
+ u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
+ gc->mask_cache;
+
+ while (stat) {
+ u32 hwirq = __fls(stat);
+
+ generic_handle_domain_irq(d, gc->irq_base + hwirq);
+ stat &= ~(1 << hwirq);
+ }
+}
+
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_ack(d);
+ ct->chip.irq_unmask(d);
+ return 0;
+}
+
+static int __init orion_bridge_irq_init(struct device_node *np,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct resource r;
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int ret, irq, nrirqs = 32;
+
+ /* get optional number of interrupts provided */
+ of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
+
+ domain = irq_domain_add_linear(np, nrirqs,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%pOFn: unable to add irq domain\n", np);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
+ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%pOFn: unable to alloc irq domain gc\n", np);
+ return ret;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret) {
+ pr_err("%pOFn: unable to get resource\n", np);
+ return ret;
+ }
+
+ if (!request_mem_region(r.start, resource_size(&r), np->name)) {
+ pr_err("%s: unable to request mem region\n", np->name);
+ return -ENOMEM;
+ }
+
+ /* Map the parent interrupt for the chained handler */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%pOFn: unable to parse irq\n", np);
+ return -EINVAL;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = ioremap(r.start, resource_size(&r));
+ if (!gc->reg_base) {
+ pr_err("%pOFn: unable to map resource\n", np);
+ return -ENOMEM;
+ }
+
+ gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
+ gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ /* mask and clear all interrupts */
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
+
+ irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
+ domain);
+
+ return 0;
+}
+IRQCHIP_DECLARE(orion_bridge_intc,
+ "marvell,orion-bridge-intc", orion_bridge_irq_init);
diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c
new file mode 100644
index 000000000..6e4127465
--- /dev/null
+++ b/drivers/irqchip/irq-owl-sirq.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi Owl SoCs SIRQ interrupt controller driver
+ *
+ * Copyright (C) 2014 Actions Semi Inc.
+ * David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Parthiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ * Author: Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_SIRQ 3
+
+#define INTC_EXTCTL_PENDING BIT(0)
+#define INTC_EXTCTL_CLK_SEL BIT(4)
+#define INTC_EXTCTL_EN BIT(5)
+#define INTC_EXTCTL_TYPE_MASK GENMASK(7, 6)
+#define INTC_EXTCTL_TYPE_HIGH 0
+#define INTC_EXTCTL_TYPE_LOW BIT(6)
+#define INTC_EXTCTL_TYPE_RISING BIT(7)
+#define INTC_EXTCTL_TYPE_FALLING (BIT(6) | BIT(7))
+
+/* S500 & S700 SIRQ control register masks */
+#define INTC_EXTCTL_SIRQ0_MASK GENMASK(23, 16)
+#define INTC_EXTCTL_SIRQ1_MASK GENMASK(15, 8)
+#define INTC_EXTCTL_SIRQ2_MASK GENMASK(7, 0)
+
+/* S900 SIRQ control register offsets, relative to controller base address */
+#define INTC_EXTCTL0 0x0000
+#define INTC_EXTCTL1 0x0328
+#define INTC_EXTCTL2 0x032c
+
+struct owl_sirq_params {
+ /* INTC_EXTCTL reg shared for all three SIRQ lines */
+ bool reg_shared;
+ /* INTC_EXTCTL reg offsets relative to controller base address */
+ u16 reg_offset[NUM_SIRQ];
+};
+
+struct owl_sirq_chip_data {
+ const struct owl_sirq_params *params;
+ void __iomem *base;
+ raw_spinlock_t lock;
+ u32 ext_irqs[NUM_SIRQ];
+};
+
+/* S500 & S700 SoCs */
+static const struct owl_sirq_params owl_sirq_s500_params = {
+ .reg_shared = true,
+ .reg_offset = { 0, 0, 0 },
+};
+
+/* S900 SoC */
+static const struct owl_sirq_params owl_sirq_s900_params = {
+ .reg_shared = false,
+ .reg_offset = { INTC_EXTCTL0, INTC_EXTCTL1, INTC_EXTCTL2 },
+};
+
+static u32 owl_field_get(u32 val, u32 index)
+{
+ switch (index) {
+ case 0:
+ return FIELD_GET(INTC_EXTCTL_SIRQ0_MASK, val);
+ case 1:
+ return FIELD_GET(INTC_EXTCTL_SIRQ1_MASK, val);
+ case 2:
+ default:
+ return FIELD_GET(INTC_EXTCTL_SIRQ2_MASK, val);
+ }
+}
+
+static u32 owl_field_prep(u32 val, u32 index)
+{
+ switch (index) {
+ case 0:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ0_MASK, val);
+ case 1:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ1_MASK, val);
+ case 2:
+ default:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ2_MASK, val);
+ }
+}
+
+static u32 owl_sirq_read_extctl(struct owl_sirq_chip_data *data, u32 index)
+{
+ u32 val;
+
+ val = readl_relaxed(data->base + data->params->reg_offset[index]);
+ if (data->params->reg_shared)
+ val = owl_field_get(val, index);
+
+ return val;
+}
+
+static void owl_sirq_write_extctl(struct owl_sirq_chip_data *data,
+ u32 extctl, u32 index)
+{
+ u32 val;
+
+ if (data->params->reg_shared) {
+ val = readl_relaxed(data->base + data->params->reg_offset[index]);
+ val &= ~owl_field_prep(0xff, index);
+ extctl = owl_field_prep(extctl, index) | val;
+ }
+
+ writel_relaxed(extctl, data->base + data->params->reg_offset[index]);
+}
+
+static void owl_sirq_clear_set_extctl(struct owl_sirq_chip_data *d,
+ u32 clear, u32 set, u32 index)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&d->lock, flags);
+ val = owl_sirq_read_extctl(d, index);
+ val &= ~clear;
+ val |= set;
+ owl_sirq_write_extctl(d, val, index);
+ raw_spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void owl_sirq_eoi(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ /*
+ * Software must clear external interrupt pending, when interrupt type
+ * is edge triggered, so we need per SIRQ based clearing.
+ */
+ if (!irqd_is_level_type(data))
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_PENDING,
+ data->hwirq);
+
+ irq_chip_eoi_parent(data);
+}
+
+static void owl_sirq_mask(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_EN, 0, data->hwirq);
+ irq_chip_mask_parent(data);
+}
+
+static void owl_sirq_unmask(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_EN, data->hwirq);
+ irq_chip_unmask_parent(data);
+}
+
+/*
+ * GIC does not handle falling edge or active low, hence SIRQ shall be
+ * programmed to convert falling edge to rising edge signal and active
+ * low to active high signal.
+ */
+static int owl_sirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+ u32 sirq_type;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sirq_type = INTC_EXTCTL_TYPE_LOW;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sirq_type = INTC_EXTCTL_TYPE_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sirq_type = INTC_EXTCTL_TYPE_FALLING;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ sirq_type = INTC_EXTCTL_TYPE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_TYPE_MASK, sirq_type,
+ data->hwirq);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip owl_sirq_chip = {
+ .name = "owl-sirq",
+ .irq_mask = owl_sirq_mask,
+ .irq_unmask = owl_sirq_unmask,
+ .irq_eoi = owl_sirq_eoi,
+ .irq_set_type = owl_sirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int owl_sirq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (!is_of_node(fwspec->fwnode))
+ return -EINVAL;
+
+ if (fwspec->param_count != 2 || fwspec->param[0] >= NUM_SIRQ)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ return 0;
+}
+
+static int owl_sirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct owl_sirq_chip_data *chip_data = domain->host_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = owl_sirq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &owl_sirq_chip,
+ chip_data);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = chip_data->ext_irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops owl_sirq_domain_ops = {
+ .translate = owl_sirq_domain_translate,
+ .alloc = owl_sirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init owl_sirq_init(const struct owl_sirq_params *params,
+ struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct owl_sirq_chip_data *chip_data;
+ int ret, i;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: failed to find sirq parent domain\n", node);
+ return -ENXIO;
+ }
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&chip_data->lock);
+
+ chip_data->params = params;
+
+ chip_data->base = of_iomap(node, 0);
+ if (!chip_data->base) {
+ pr_err("%pOF: failed to map sirq registers\n", node);
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ for (i = 0; i < NUM_SIRQ; i++) {
+ struct of_phandle_args irq;
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret) {
+ pr_err("%pOF: failed to parse interrupt %d\n", node, i);
+ goto out_unmap;
+ }
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ chip_data->ext_irqs[i] = irq.args[1];
+
+ /* Set 24MHz external interrupt clock freq */
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
+ &owl_sirq_domain_ops, chip_data);
+ if (!domain) {
+ pr_err("%pOF: failed to add domain\n", node);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ iounmap(chip_data->base);
+out_free:
+ kfree(chip_data);
+
+ return ret;
+}
+
+static int __init owl_sirq_s500_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return owl_sirq_init(&owl_sirq_s500_params, node, parent);
+}
+
+IRQCHIP_DECLARE(owl_sirq_s500, "actions,s500-sirq", owl_sirq_s500_of_init);
+IRQCHIP_DECLARE(owl_sirq_s700, "actions,s700-sirq", owl_sirq_s500_of_init);
+
+static int __init owl_sirq_s900_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return owl_sirq_init(&owl_sirq_s900_params, node, parent);
+}
+
+IRQCHIP_DECLARE(owl_sirq_s900, "actions,s900-sirq", owl_sirq_s900_of_init);
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 000000000..8e76d2913
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct partition_desc {
+ int nr_parts;
+ struct partition_affinity *parts;
+ struct irq_domain *domain;
+ struct irq_desc *chained_desc;
+ unsigned long *bitmap;
+ struct irq_domain_ops ops;
+};
+
+static bool partition_check_cpu(struct partition_desc *part,
+ unsigned int cpu, unsigned int hwirq)
+{
+ return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
+}
+
+static void partition_irq_mask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_mask)
+ chip->irq_mask(data);
+}
+
+static void partition_irq_unmask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_unmask)
+ chip->irq_unmask(data);
+}
+
+static int partition_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_set_irqchip_state)
+ return chip->irq_set_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_get_irqchip_state)
+ return chip->irq_get_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (chip->irq_set_type)
+ return chip->irq_set_type(data, type);
+
+ return -EINVAL;
+}
+
+static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+}
+
+static struct irq_chip partition_irq_chip = {
+ .irq_mask = partition_irq_mask,
+ .irq_unmask = partition_irq_unmask,
+ .irq_set_type = partition_irq_set_type,
+ .irq_get_irqchip_state = partition_irq_get_irqchip_state,
+ .irq_set_irqchip_state = partition_irq_set_irqchip_state,
+ .irq_print_chip = partition_irq_print_chip,
+};
+
+static void partition_handle_irq(struct irq_desc *desc)
+{
+ struct partition_desc *part = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cpu = smp_processor_id();
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
+ if (partition_check_cpu(part, cpu, hwirq))
+ break;
+ }
+
+ if (unlikely(hwirq == part->nr_parts))
+ handle_bad_irq(desc);
+ else
+ generic_handle_domain_irq(part->domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+ struct partition_desc *part;
+
+ BUG_ON(nr_irqs != 1);
+ ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ part = domain->host_data;
+
+ set_bit(hwirq, part->bitmap);
+ irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
+ partition_handle_irq, part);
+ irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
+ irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
+ handle_percpu_devid_irq, NULL, NULL);
+ irq_set_status_flags(virq, IRQ_NOAUTOEN);
+
+ return 0;
+}
+
+static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+
+ BUG_ON(nr_irqs != 1);
+
+ d = irq_domain_get_irq_data(domain, virq);
+ irq_set_handler(virq, NULL);
+ irq_domain_reset_irq_data(d);
+}
+
+int partition_translate_id(struct partition_desc *desc, void *partition_id)
+{
+ struct partition_affinity *part = NULL;
+ int i;
+
+ for (i = 0; i < desc->nr_parts; i++) {
+ if (desc->parts[i].partition_id == partition_id) {
+ part = &desc->parts[i];
+ break;
+ }
+ }
+
+ if (WARN_ON(!part)) {
+ pr_err("Failed to find partition\n");
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops)
+{
+ struct partition_desc *desc;
+ struct irq_domain *d;
+
+ BUG_ON(!ops->select || !ops->translate);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->ops = *ops;
+ desc->ops.free = partition_domain_free;
+ desc->ops.alloc = partition_domain_alloc;
+
+ d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
+ if (!d)
+ goto out;
+ desc->domain = d;
+
+ desc->bitmap = bitmap_zalloc(nr_parts, GFP_KERNEL);
+ if (WARN_ON(!desc->bitmap))
+ goto out;
+
+ desc->chained_desc = irq_to_desc(chained_irq);
+ desc->nr_parts = nr_parts;
+ desc->parts = parts;
+
+ return desc;
+out:
+ if (d)
+ irq_domain_remove(d);
+ kfree(desc);
+
+ return NULL;
+}
+
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+ if (dsc)
+ return dsc->domain;
+
+ return NULL;
+}
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
new file mode 100644
index 000000000..1d9bb28d1
--- /dev/null
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cristian Birsan <cristian.birsan@microchip.com>
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/mach-pic32/pic32.h>
+
+#define REG_INTCON 0x0000
+#define REG_INTSTAT 0x0020
+#define REG_IFS_OFFSET 0x0040
+#define REG_IEC_OFFSET 0x00C0
+#define REG_IPC_OFFSET 0x0140
+#define REG_OFF_OFFSET 0x0540
+
+#define MAJPRI_MASK 0x07
+#define SUBPRI_MASK 0x03
+#define PRIORITY_MASK 0x1F
+
+#define PIC32_INT_PRI(pri, subpri) \
+ ((((pri) & MAJPRI_MASK) << 2) | ((subpri) & SUBPRI_MASK))
+
+struct evic_chip_data {
+ u32 irq_types[NR_IRQS];
+ u32 ext_irqs[8];
+};
+
+static struct irq_domain *evic_irq_domain;
+static void __iomem *evic_base;
+
+asmlinkage void __weak plat_irq_dispatch(void)
+{
+ unsigned int hwirq;
+
+ hwirq = readl(evic_base + REG_INTSTAT) & 0xFF;
+ do_domain_IRQ(evic_irq_domain, hwirq);
+}
+
+static struct evic_chip_data *irqd_to_priv(struct irq_data *data)
+{
+ return (struct evic_chip_data *)data->domain->host_data;
+}
+
+static int pic32_set_ext_polarity(int bit, u32 type)
+{
+ /*
+ * External interrupts can be either edge rising or edge falling,
+ * but not both.
+ */
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ writel(BIT(bit), evic_base + PIC32_SET(REG_INTCON));
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ writel(BIT(bit), evic_base + PIC32_CLR(REG_INTCON));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pic32_set_type_edge(struct irq_data *data,
+ unsigned int flow_type)
+{
+ struct evic_chip_data *priv = irqd_to_priv(data);
+ int ret;
+ int i;
+
+ if (!(flow_type & IRQ_TYPE_EDGE_BOTH))
+ return -EBADR;
+
+ /* set polarity for external interrupts only */
+ for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
+ if (priv->ext_irqs[i] == data->hwirq) {
+ ret = pic32_set_ext_polarity(i, flow_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ irqd_set_trigger_type(data, flow_type);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void pic32_bind_evic_interrupt(int irq, int set)
+{
+ writel(set, evic_base + REG_OFF_OFFSET + irq * 4);
+}
+
+static void pic32_set_irq_priority(int irq, int priority)
+{
+ u32 reg, shift;
+
+ reg = irq / 4;
+ shift = (irq % 4) * 8;
+
+ writel(PRIORITY_MASK << shift,
+ evic_base + PIC32_CLR(REG_IPC_OFFSET + reg * 0x10));
+ writel(priority << shift,
+ evic_base + PIC32_SET(REG_IPC_OFFSET + reg * 0x10));
+}
+
+#define IRQ_REG_MASK(_hwirq, _reg, _mask) \
+ do { \
+ _reg = _hwirq / 32; \
+ _mask = 1 << (_hwirq % 32); \
+ } while (0)
+
+static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct evic_chip_data *priv = d->host_data;
+ struct irq_data *data;
+ int ret;
+ u32 iecclr, ifsclr;
+ u32 reg, mask;
+
+ ret = irq_map_generic_chip(d, virq, hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Piggyback on xlate function to move to an alternate chip as necessary
+ * at time of mapping instead of allowing the flow handler/chip to be
+ * changed later. This requires all interrupts to be configured through
+ * DT.
+ */
+ if (priv->irq_types[hw] & IRQ_TYPE_SENSE_MASK) {
+ data = irq_domain_get_irq_data(d, virq);
+ irqd_set_trigger_type(data, priv->irq_types[hw]);
+ irq_setup_alt_chip(data, priv->irq_types[hw]);
+ }
+
+ IRQ_REG_MASK(hw, reg, mask);
+
+ iecclr = PIC32_CLR(REG_IEC_OFFSET + reg * 0x10);
+ ifsclr = PIC32_CLR(REG_IFS_OFFSET + reg * 0x10);
+
+ /* mask and clear flag */
+ writel(mask, evic_base + iecclr);
+ writel(mask, evic_base + ifsclr);
+
+ /* default priority is required */
+ pic32_set_irq_priority(hw, PIC32_INT_PRI(2, 0));
+
+ return ret;
+}
+
+int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ struct evic_chip_data *priv = d->host_data;
+
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[0] >= NR_IRQS))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+ priv->irq_types[intspec[0]] = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops pic32_irq_domain_ops = {
+ .map = pic32_irq_domain_map,
+ .xlate = pic32_irq_domain_xlate,
+};
+
+static void __init pic32_ext_irq_of_init(struct irq_domain *domain)
+{
+ struct device_node *node = irq_domain_get_of_node(domain);
+ struct evic_chip_data *priv = domain->host_data;
+ struct property *prop;
+ const __le32 *p;
+ u32 hwirq;
+ int i = 0;
+ const char *pname = "microchip,external-irqs";
+
+ of_property_for_each_u32(node, pname, prop, p, hwirq) {
+ if (i >= ARRAY_SIZE(priv->ext_irqs)) {
+ pr_warn("More than %d external irq, skip rest\n",
+ ARRAY_SIZE(priv->ext_irqs));
+ break;
+ }
+
+ priv->ext_irqs[i] = hwirq;
+ i++;
+ }
+}
+
+static int __init pic32_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct evic_chip_data *priv;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int nchips, ret;
+ int i;
+
+ nchips = DIV_ROUND_UP(NR_IRQS, 32);
+
+ evic_base = of_iomap(node, 0);
+ if (!evic_base)
+ return -ENOMEM;
+
+ priv = kcalloc(nchips, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ evic_irq_domain = irq_domain_add_linear(node, nchips * 32,
+ &pic32_irq_domain_ops,
+ priv);
+ if (!evic_irq_domain) {
+ ret = -ENOMEM;
+ goto err_free_priv;
+ }
+
+ /*
+ * The PIC32 EVIC has a linear list of irqs and the type of each
+ * irq is determined by the hardware peripheral the EVIC is arbitrating.
+ * These irq types are defined in the datasheet as "persistent" and
+ * "non-persistent" which are mapped here to level and edge
+ * respectively. To manage the different flow handler requirements of
+ * each irq type, different chip_types are used.
+ */
+ ret = irq_alloc_domain_generic_chips(evic_irq_domain, 32, 2,
+ "evic-level", handle_level_irq,
+ clr, 0, 0);
+ if (ret)
+ goto err_domain_remove;
+
+ board_bind_eic_interrupt = &pic32_bind_evic_interrupt;
+
+ for (i = 0; i < nchips; i++) {
+ u32 ifsclr = PIC32_CLR(REG_IFS_OFFSET + (i * 0x10));
+ u32 iec = REG_IEC_OFFSET + (i * 0x10);
+
+ gc = irq_get_domain_generic_chip(evic_irq_domain, i * 32);
+
+ gc->reg_base = evic_base;
+ gc->unused = 0;
+
+ /*
+ * Level/persistent interrupts have a special requirement that
+ * the condition generating the interrupt be cleared before the
+ * interrupt flag (ifs) can be cleared. chip.irq_eoi is used to
+ * complete the interrupt with an ack.
+ */
+ gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[0].handler = handle_fasteoi_irq;
+ gc->chip_types[0].regs.ack = ifsclr;
+ gc->chip_types[0].regs.mask = iec;
+ gc->chip_types[0].chip.name = "evic-level";
+ gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.flags = IRQCHIP_SKIP_SET_WAKE;
+
+ /* Edge interrupts */
+ gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[1].handler = handle_edge_irq;
+ gc->chip_types[1].regs.ack = ifsclr;
+ gc->chip_types[1].regs.mask = iec;
+ gc->chip_types[1].chip.name = "evic-edge";
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = pic32_set_type_edge;
+ gc->chip_types[1].chip.flags = IRQCHIP_SKIP_SET_WAKE;
+
+ gc->private = &priv[i];
+ }
+
+ irq_set_default_host(evic_irq_domain);
+
+ /*
+ * External interrupts have software configurable edge polarity. These
+ * interrupts are defined in DT allowing polarity to be configured only
+ * for these interrupts when requested.
+ */
+ pic32_ext_irq_of_init(evic_irq_domain);
+
+ return 0;
+
+err_domain_remove:
+ irq_domain_remove(evic_irq_domain);
+
+err_free_priv:
+ kfree(priv);
+
+err_iounmap:
+ iounmap(evic_base);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(pic32_evic, "microchip,pic32mzda-evic", pic32_of_init);
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
new file mode 100644
index 000000000..fa8d89b02
--- /dev/null
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS INTC IRQChip driver for various TI SoCs
+ *
+ * Copyright (C) 2016-2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author(s):
+ * Andrew F. Davis <afd@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ *
+ * Copyright (C) 2019 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/*
+ * Number of host interrupts reaching the main MPU sub-system. Note that this
+ * is not the same as the total number of host interrupts supported by the PRUSS
+ * INTC instance
+ */
+#define MAX_NUM_HOST_IRQS 8
+
+/* minimum starting host interrupt number for MPU */
+#define FIRST_PRU_HOST_INT 2
+
+/* PRU_ICSS_INTC registers */
+#define PRU_INTC_REVID 0x0000
+#define PRU_INTC_CR 0x0004
+#define PRU_INTC_GER 0x0010
+#define PRU_INTC_GNLR 0x001c
+#define PRU_INTC_SISR 0x0020
+#define PRU_INTC_SICR 0x0024
+#define PRU_INTC_EISR 0x0028
+#define PRU_INTC_EICR 0x002c
+#define PRU_INTC_HIEISR 0x0034
+#define PRU_INTC_HIDISR 0x0038
+#define PRU_INTC_GPIR 0x0080
+#define PRU_INTC_SRSR(x) (0x0200 + (x) * 4)
+#define PRU_INTC_SECR(x) (0x0280 + (x) * 4)
+#define PRU_INTC_ESR(x) (0x0300 + (x) * 4)
+#define PRU_INTC_ECR(x) (0x0380 + (x) * 4)
+#define PRU_INTC_CMR(x) (0x0400 + (x) * 4)
+#define PRU_INTC_HMR(x) (0x0800 + (x) * 4)
+#define PRU_INTC_HIPIR(x) (0x0900 + (x) * 4)
+#define PRU_INTC_SIPR(x) (0x0d00 + (x) * 4)
+#define PRU_INTC_SITR(x) (0x0d80 + (x) * 4)
+#define PRU_INTC_HINLR(x) (0x1100 + (x) * 4)
+#define PRU_INTC_HIER 0x1500
+
+/* CMR register bit-field macros */
+#define CMR_EVT_MAP_MASK 0xf
+#define CMR_EVT_MAP_BITS 8
+#define CMR_EVT_PER_REG 4
+
+/* HMR register bit-field macros */
+#define HMR_CH_MAP_MASK 0xf
+#define HMR_CH_MAP_BITS 8
+#define HMR_CH_PER_REG 4
+
+/* HIPIR register bit-fields */
+#define INTC_HIPIR_NONE_HINT 0x80000000
+
+#define MAX_PRU_SYS_EVENTS 160
+#define MAX_PRU_CHANNELS 20
+
+/**
+ * struct pruss_intc_map_record - keeps track of actual mapping state
+ * @value: The currently mapped value (channel or host)
+ * @ref_count: Keeps track of number of current users of this resource
+ */
+struct pruss_intc_map_record {
+ u8 value;
+ u8 ref_count;
+};
+
+/**
+ * struct pruss_intc_match_data - match data to handle SoC variations
+ * @num_system_events: number of input system events handled by the PRUSS INTC
+ * @num_host_events: number of host events (which is equal to number of
+ * channels) supported by the PRUSS INTC
+ */
+struct pruss_intc_match_data {
+ u8 num_system_events;
+ u8 num_host_events;
+};
+
+/**
+ * struct pruss_intc - PRUSS interrupt controller structure
+ * @event_channel: current state of system event to channel mappings
+ * @channel_host: current state of channel to host mappings
+ * @irqs: kernel irq numbers corresponding to PRUSS host interrupts
+ * @base: base virtual address of INTC register space
+ * @domain: irq domain for this interrupt controller
+ * @soc_config: cached PRUSS INTC IP configuration data
+ * @dev: PRUSS INTC device pointer
+ * @lock: mutex to serialize interrupts mapping
+ */
+struct pruss_intc {
+ struct pruss_intc_map_record event_channel[MAX_PRU_SYS_EVENTS];
+ struct pruss_intc_map_record channel_host[MAX_PRU_CHANNELS];
+ unsigned int irqs[MAX_NUM_HOST_IRQS];
+ void __iomem *base;
+ struct irq_domain *domain;
+ const struct pruss_intc_match_data *soc_config;
+ struct device *dev;
+ struct mutex lock; /* PRUSS INTC lock */
+};
+
+/**
+ * struct pruss_host_irq_data - PRUSS host irq data structure
+ * @intc: PRUSS interrupt controller pointer
+ * @host_irq: host irq number
+ */
+struct pruss_host_irq_data {
+ struct pruss_intc *intc;
+ u8 host_irq;
+};
+
+static inline u32 pruss_intc_read_reg(struct pruss_intc *intc, unsigned int reg)
+{
+ return readl_relaxed(intc->base + reg);
+}
+
+static inline void pruss_intc_write_reg(struct pruss_intc *intc,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, intc->base + reg);
+}
+
+static void pruss_intc_update_cmr(struct pruss_intc *intc, unsigned int evt,
+ u8 ch)
+{
+ u32 idx, offset, val;
+
+ idx = evt / CMR_EVT_PER_REG;
+ offset = (evt % CMR_EVT_PER_REG) * CMR_EVT_MAP_BITS;
+
+ val = pruss_intc_read_reg(intc, PRU_INTC_CMR(idx));
+ val &= ~(CMR_EVT_MAP_MASK << offset);
+ val |= ch << offset;
+ pruss_intc_write_reg(intc, PRU_INTC_CMR(idx), val);
+
+ dev_dbg(intc->dev, "SYSEV%u -> CH%d (CMR%d 0x%08x)\n", evt, ch,
+ idx, pruss_intc_read_reg(intc, PRU_INTC_CMR(idx)));
+}
+
+static void pruss_intc_update_hmr(struct pruss_intc *intc, u8 ch, u8 host)
+{
+ u32 idx, offset, val;
+
+ idx = ch / HMR_CH_PER_REG;
+ offset = (ch % HMR_CH_PER_REG) * HMR_CH_MAP_BITS;
+
+ val = pruss_intc_read_reg(intc, PRU_INTC_HMR(idx));
+ val &= ~(HMR_CH_MAP_MASK << offset);
+ val |= host << offset;
+ pruss_intc_write_reg(intc, PRU_INTC_HMR(idx), val);
+
+ dev_dbg(intc->dev, "CH%d -> HOST%d (HMR%d 0x%08x)\n", ch, host, idx,
+ pruss_intc_read_reg(intc, PRU_INTC_HMR(idx)));
+}
+
+/**
+ * pruss_intc_map() - configure the PRUSS INTC
+ * @intc: PRUSS interrupt controller pointer
+ * @hwirq: the system event number
+ *
+ * Configures the PRUSS INTC with the provided configuration from the one parsed
+ * in the xlate function.
+ */
+static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
+{
+ struct device *dev = intc->dev;
+ u8 ch, host, reg_idx;
+ u32 val;
+
+ mutex_lock(&intc->lock);
+
+ intc->event_channel[hwirq].ref_count++;
+
+ ch = intc->event_channel[hwirq].value;
+ host = intc->channel_host[ch].value;
+
+ pruss_intc_update_cmr(intc, hwirq, ch);
+
+ reg_idx = hwirq / 32;
+ val = BIT(hwirq % 32);
+
+ /* clear and enable system event */
+ pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val);
+ pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
+
+ if (++intc->channel_host[ch].ref_count == 1) {
+ pruss_intc_update_hmr(intc, ch, host);
+
+ /* enable host interrupts */
+ pruss_intc_write_reg(intc, PRU_INTC_HIEISR, host);
+ }
+
+ dev_dbg(dev, "mapped system_event = %lu channel = %d host = %d",
+ hwirq, ch, host);
+
+ mutex_unlock(&intc->lock);
+}
+
+/**
+ * pruss_intc_unmap() - unconfigure the PRUSS INTC
+ * @intc: PRUSS interrupt controller pointer
+ * @hwirq: the system event number
+ *
+ * Undo whatever was done in pruss_intc_map() for a PRU core.
+ * Mappings are reference counted, so resources are only disabled when there
+ * are no longer any users.
+ */
+static void pruss_intc_unmap(struct pruss_intc *intc, unsigned long hwirq)
+{
+ u8 ch, host, reg_idx;
+ u32 val;
+
+ mutex_lock(&intc->lock);
+
+ ch = intc->event_channel[hwirq].value;
+ host = intc->channel_host[ch].value;
+
+ if (--intc->channel_host[ch].ref_count == 0) {
+ /* disable host interrupts */
+ pruss_intc_write_reg(intc, PRU_INTC_HIDISR, host);
+
+ /* clear the map using reset value 0 */
+ pruss_intc_update_hmr(intc, ch, 0);
+ }
+
+ intc->event_channel[hwirq].ref_count--;
+ reg_idx = hwirq / 32;
+ val = BIT(hwirq % 32);
+
+ /* disable system events */
+ pruss_intc_write_reg(intc, PRU_INTC_ECR(reg_idx), val);
+ /* clear any pending status */
+ pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
+
+ /* clear the map using reset value 0 */
+ pruss_intc_update_cmr(intc, hwirq, 0);
+
+ dev_dbg(intc->dev, "unmapped system_event = %lu channel = %d host = %d\n",
+ hwirq, ch, host);
+
+ mutex_unlock(&intc->lock);
+}
+
+static void pruss_intc_init(struct pruss_intc *intc)
+{
+ const struct pruss_intc_match_data *soc_config = intc->soc_config;
+ int num_chnl_map_regs, num_host_intr_regs, num_event_type_regs, i;
+
+ num_chnl_map_regs = DIV_ROUND_UP(soc_config->num_system_events,
+ CMR_EVT_PER_REG);
+ num_host_intr_regs = DIV_ROUND_UP(soc_config->num_host_events,
+ HMR_CH_PER_REG);
+ num_event_type_regs = DIV_ROUND_UP(soc_config->num_system_events, 32);
+
+ /*
+ * configure polarity (SIPR register) to active high and
+ * type (SITR register) to level interrupt for all system events
+ */
+ for (i = 0; i < num_event_type_regs; i++) {
+ pruss_intc_write_reg(intc, PRU_INTC_SIPR(i), 0xffffffff);
+ pruss_intc_write_reg(intc, PRU_INTC_SITR(i), 0);
+ }
+
+ /* clear all interrupt channel map registers, 4 events per register */
+ for (i = 0; i < num_chnl_map_regs; i++)
+ pruss_intc_write_reg(intc, PRU_INTC_CMR(i), 0);
+
+ /* clear all host interrupt map registers, 4 channels per register */
+ for (i = 0; i < num_host_intr_regs; i++)
+ pruss_intc_write_reg(intc, PRU_INTC_HMR(i), 0);
+
+ /* global interrupt enable */
+ pruss_intc_write_reg(intc, PRU_INTC_GER, 1);
+}
+
+static void pruss_intc_irq_ack(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
+}
+
+static void pruss_intc_irq_mask(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_EICR, hwirq);
+}
+
+static void pruss_intc_irq_unmask(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_EISR, hwirq);
+}
+
+static int pruss_intc_irq_reqres(struct irq_data *data)
+{
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void pruss_intc_irq_relres(struct irq_data *data)
+{
+ module_put(THIS_MODULE);
+}
+
+static int pruss_intc_irq_get_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ u32 reg, mask, srsr;
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ reg = PRU_INTC_SRSR(data->hwirq / 32);
+ mask = BIT(data->hwirq % 32);
+
+ srsr = pruss_intc_read_reg(intc, reg);
+
+ *state = !!(srsr & mask);
+
+ return 0;
+}
+
+static int pruss_intc_irq_set_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state)
+ pruss_intc_write_reg(intc, PRU_INTC_SISR, data->hwirq);
+ else
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, data->hwirq);
+
+ return 0;
+}
+
+static struct irq_chip pruss_irqchip = {
+ .name = "pruss-intc",
+ .irq_ack = pruss_intc_irq_ack,
+ .irq_mask = pruss_intc_irq_mask,
+ .irq_unmask = pruss_intc_irq_unmask,
+ .irq_request_resources = pruss_intc_irq_reqres,
+ .irq_release_resources = pruss_intc_irq_relres,
+ .irq_get_irqchip_state = pruss_intc_irq_get_irqchip_state,
+ .irq_set_irqchip_state = pruss_intc_irq_set_irqchip_state,
+};
+
+static int pruss_intc_validate_mapping(struct pruss_intc *intc, int event,
+ int channel, int host)
+{
+ struct device *dev = intc->dev;
+ int ret = 0;
+
+ mutex_lock(&intc->lock);
+
+ /* check if sysevent already assigned */
+ if (intc->event_channel[event].ref_count > 0 &&
+ intc->event_channel[event].value != channel) {
+ dev_err(dev, "event %d (req. ch %d) already assigned to channel %d\n",
+ event, channel, intc->event_channel[event].value);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* check if channel already assigned */
+ if (intc->channel_host[channel].ref_count > 0 &&
+ intc->channel_host[channel].value != host) {
+ dev_err(dev, "channel %d (req. host %d) already assigned to host %d\n",
+ channel, host, intc->channel_host[channel].value);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ intc->event_channel[event].value = channel;
+ intc->channel_host[channel].value = host;
+
+unlock:
+ mutex_unlock(&intc->lock);
+ return ret;
+}
+
+static int
+pruss_intc_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct pruss_intc *intc = d->host_data;
+ struct device *dev = intc->dev;
+ int ret, sys_event, channel, host;
+
+ if (intsize < 3)
+ return -EINVAL;
+
+ sys_event = intspec[0];
+ if (sys_event < 0 || sys_event >= intc->soc_config->num_system_events) {
+ dev_err(dev, "%d is not valid event number\n", sys_event);
+ return -EINVAL;
+ }
+
+ channel = intspec[1];
+ if (channel < 0 || channel >= intc->soc_config->num_host_events) {
+ dev_err(dev, "%d is not valid channel number", channel);
+ return -EINVAL;
+ }
+
+ host = intspec[2];
+ if (host < 0 || host >= intc->soc_config->num_host_events) {
+ dev_err(dev, "%d is not valid host irq number\n", host);
+ return -EINVAL;
+ }
+
+ /* check if requested sys_event was already mapped, if so validate it */
+ ret = pruss_intc_validate_mapping(intc, sys_event, channel, host);
+ if (ret)
+ return ret;
+
+ *out_hwirq = sys_event;
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int pruss_intc_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct pruss_intc *intc = d->host_data;
+
+ pruss_intc_map(intc, hw);
+
+ irq_set_chip_data(virq, intc);
+ irq_set_chip_and_handler(virq, &pruss_irqchip, handle_level_irq);
+
+ return 0;
+}
+
+static void pruss_intc_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+ struct pruss_intc *intc = d->host_data;
+ unsigned long hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
+
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+ pruss_intc_unmap(intc, hwirq);
+}
+
+static const struct irq_domain_ops pruss_intc_irq_domain_ops = {
+ .xlate = pruss_intc_irq_domain_xlate,
+ .map = pruss_intc_irq_domain_map,
+ .unmap = pruss_intc_irq_domain_unmap,
+};
+
+static void pruss_intc_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pruss_host_irq_data *host_irq_data = irq_get_handler_data(irq);
+ struct pruss_intc *intc = host_irq_data->intc;
+ u8 host_irq = host_irq_data->host_irq + FIRST_PRU_HOST_INT;
+
+ chained_irq_enter(chip, desc);
+
+ while (true) {
+ u32 hipir;
+ int hwirq, err;
+
+ /* get highest priority pending PRUSS system event */
+ hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq));
+ if (hipir & INTC_HIPIR_NONE_HINT)
+ break;
+
+ hwirq = hipir & GENMASK(9, 0);
+ err = generic_handle_domain_irq(intc->domain, hwirq);
+
+ /*
+ * NOTE: manually ACK any system events that do not have a
+ * handler mapped yet
+ */
+ if (WARN_ON_ONCE(err))
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static const char * const irq_names[MAX_NUM_HOST_IRQS] = {
+ "host_intr0", "host_intr1", "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5", "host_intr6", "host_intr7",
+};
+
+static int pruss_intc_probe(struct platform_device *pdev)
+{
+ const struct pruss_intc_match_data *data;
+ struct device *dev = &pdev->dev;
+ struct pruss_intc *intc;
+ struct pruss_host_irq_data *host_data;
+ int i, irq, ret;
+ u8 max_system_events, irqs_reserved = 0;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ max_system_events = data->num_system_events;
+
+ intc = devm_kzalloc(dev, sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ intc->soc_config = data;
+ intc->dev = dev;
+ platform_set_drvdata(pdev, intc);
+
+ intc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(intc->base))
+ return PTR_ERR(intc->base);
+
+ ret = of_property_read_u8(dev->of_node, "ti,irqs-reserved",
+ &irqs_reserved);
+
+ /*
+ * The irqs-reserved is used only for some SoC's therefore not having
+ * this property is still valid
+ */
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ pruss_intc_init(intc);
+
+ mutex_init(&intc->lock);
+
+ intc->domain = irq_domain_add_linear(dev->of_node, max_system_events,
+ &pruss_intc_irq_domain_ops, intc);
+ if (!intc->domain)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
+ if (irqs_reserved & BIT(i))
+ continue;
+
+ irq = platform_get_irq_byname(pdev, irq_names[i]);
+ if (irq <= 0) {
+ ret = (irq == 0) ? -EINVAL : irq;
+ goto fail_irq;
+ }
+
+ intc->irqs[i] = irq;
+
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data) {
+ ret = -ENOMEM;
+ goto fail_irq;
+ }
+
+ host_data->intc = intc;
+ host_data->host_irq = i;
+
+ irq_set_handler_data(irq, host_data);
+ irq_set_chained_handler(irq, pruss_intc_irq_handler);
+ }
+
+ return 0;
+
+fail_irq:
+ while (--i >= 0) {
+ if (intc->irqs[i])
+ irq_set_chained_handler_and_data(intc->irqs[i], NULL,
+ NULL);
+ }
+
+ irq_domain_remove(intc->domain);
+
+ return ret;
+}
+
+static int pruss_intc_remove(struct platform_device *pdev)
+{
+ struct pruss_intc *intc = platform_get_drvdata(pdev);
+ u8 max_system_events = intc->soc_config->num_system_events;
+ unsigned int hwirq;
+ int i;
+
+ for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
+ if (intc->irqs[i])
+ irq_set_chained_handler_and_data(intc->irqs[i], NULL,
+ NULL);
+ }
+
+ for (hwirq = 0; hwirq < max_system_events; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
+
+ irq_domain_remove(intc->domain);
+
+ return 0;
+}
+
+static const struct pruss_intc_match_data pruss_intc_data = {
+ .num_system_events = 64,
+ .num_host_events = 10,
+};
+
+static const struct pruss_intc_match_data icssg_intc_data = {
+ .num_system_events = 160,
+ .num_host_events = 20,
+};
+
+static const struct of_device_id pruss_intc_of_match[] = {
+ {
+ .compatible = "ti,pruss-intc",
+ .data = &pruss_intc_data,
+ },
+ {
+ .compatible = "ti,icssg-intc",
+ .data = &icssg_intc_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
+
+static struct platform_driver pruss_intc_driver = {
+ .driver = {
+ .name = "pruss-intc",
+ .of_match_table = pruss_intc_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pruss_intc_probe,
+ .remove = pruss_intc_remove,
+};
+module_platform_driver(pruss_intc_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("TI PRU-ICSS INTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
new file mode 100644
index 000000000..d30614661
--- /dev/null
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
+ * which is commonly found on Qualcomm SoCs built on the RPM architecture.
+ * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
+ * asleep, and wakes up the AP when one of those interrupts occurs. This driver
+ * doesn't directly access physical MPM registers though. Instead, the access
+ * is bridged via a piece of internal memory (SRAM) that is accessible to both
+ * AP and RPM. This piece of memory is called 'vMPM' in the driver.
+ *
+ * When SoC is awake, the vMPM is owned by AP and the register setup by this
+ * driver all happens on vMPM. When AP is about to get power collapsed, the
+ * driver sends a mailbox notification to RPM, which will take over the vMPM
+ * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
+ * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
+ * Then AP start owning vMPM again.
+ *
+ * vMPM register map:
+ *
+ * 31 0
+ * +--------------------------------+
+ * | TIMER0 | 0x00
+ * +--------------------------------+
+ * | TIMER1 | 0x04
+ * +--------------------------------+
+ * | ENABLE0 | 0x08
+ * +--------------------------------+
+ * | ... | ...
+ * +--------------------------------+
+ * | ENABLEn |
+ * +--------------------------------+
+ * | FALLING_EDGE0 |
+ * +--------------------------------+
+ * | ... |
+ * +--------------------------------+
+ * | STATUSn |
+ * +--------------------------------+
+ *
+ * n = DIV_ROUND_UP(pin_cnt, 32)
+ *
+ */
+
+#define MPM_REG_ENABLE 0
+#define MPM_REG_FALLING_EDGE 1
+#define MPM_REG_RISING_EDGE 2
+#define MPM_REG_POLARITY 3
+#define MPM_REG_STATUS 4
+
+/* MPM pin map to GIC hwirq */
+struct mpm_gic_map {
+ int pin;
+ irq_hw_number_t hwirq;
+};
+
+struct qcom_mpm_priv {
+ void __iomem *base;
+ raw_spinlock_t lock;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+ struct mpm_gic_map *maps;
+ unsigned int map_cnt;
+ unsigned int reg_stride;
+ struct irq_domain *domain;
+ struct generic_pm_domain genpd;
+};
+
+static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ return readl_relaxed(priv->base + offset);
+}
+
+static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
+ unsigned int index, u32 val)
+{
+ unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
+
+ writel_relaxed(val, priv->base + offset);
+
+ /* Ensure the write is completed */
+ wmb();
+}
+
+static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
+ __assign_bit(shift, &val, en);
+ qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void qcom_mpm_mask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, false);
+
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+}
+
+static void qcom_mpm_unmask(struct irq_data *d)
+{
+ qcom_mpm_enable_irq(d, true);
+
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+}
+
+static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
+ unsigned int index, unsigned int shift)
+{
+ unsigned long flags, val;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ val = qcom_mpm_read(priv, reg, index);
+ __assign_bit(shift, &val, set);
+ qcom_mpm_write(priv, reg, index, val);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
+{
+ struct qcom_mpm_priv *priv = d->chip_data;
+ int pin = d->hwirq;
+ unsigned int index = pin / 32;
+ unsigned int shift = pin % 32;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
+ else
+ mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
+
+ if (!d->parent_data)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_mpm_chip = {
+ .name = "mpm",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = qcom_mpm_mask,
+ .irq_unmask = qcom_mpm_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = qcom_mpm_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
+{
+ struct mpm_gic_map *maps = priv->maps;
+ int i;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ if (maps[i].pin == pin)
+ return &maps[i];
+ }
+
+ return NULL;
+}
+
+static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct qcom_mpm_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct mpm_gic_map *map;
+ irq_hw_number_t pin;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
+ &qcom_mpm_chip, priv);
+ if (ret)
+ return ret;
+
+ map = get_mpm_gic_map(priv, pin);
+ if (map == NULL)
+ return irq_domain_disconnect_hierarchy(domain->parent, virq);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = map->hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_mpm_ops = {
+ .alloc = qcom_mpm_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+/* Triggered by RPM when system resumes from deep sleep */
+static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
+{
+ struct qcom_mpm_priv *priv = dev_id;
+ unsigned long enable, pending;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ raw_spin_lock_irqsave(&priv->lock, flags);
+ enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
+ pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
+ pending &= enable;
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ for_each_set_bit(j, &pending, 32) {
+ unsigned int pin = 32 * i + j;
+ struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
+ struct irq_data *d = &desc->irq_data;
+
+ if (!irqd_is_level_type(d))
+ irq_set_irqchip_state(d->irq,
+ IRQCHIP_STATE_PENDING, true);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static int mpm_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
+ genpd);
+ int i, ret;
+
+ for (i = 0; i < priv->reg_stride; i++)
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+
+ /* Notify RPM to write vMPM into HW */
+ ret = mbox_send_message(priv->mbox_chan, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++)
+ if (maps[i].hwirq == hwirq)
+ return true;
+
+ return false;
+}
+
+static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
+{
+ struct platform_device *pdev = of_find_device_by_node(np);
+ struct device *dev = &pdev->dev;
+ struct irq_domain *parent_domain;
+ struct generic_pm_domain *genpd;
+ struct qcom_mpm_priv *priv;
+ unsigned int pin_cnt;
+ int i, irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
+ if (ret) {
+ dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
+ return ret;
+ }
+
+ priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
+
+ ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
+ if (ret < 0) {
+ dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
+ return ret;
+ }
+
+ if (ret % 2) {
+ dev_err(dev, "invalid qcom,mpm-pin-map\n");
+ return -EINVAL;
+ }
+
+ priv->map_cnt = ret / 2;
+ priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
+ GFP_KERNEL);
+ if (!priv->maps)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->map_cnt; i++) {
+ u32 pin, hwirq;
+
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
+ of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
+
+ if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
+ dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
+ pin, hwirq);
+ continue;
+ }
+
+ priv->maps[i].pin = pin;
+ priv->maps[i].hwirq = hwirq;
+ }
+
+ raw_spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ for (i = 0; i < priv->reg_stride; i++) {
+ qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
+ qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
+ qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ genpd = &priv->genpd;
+ genpd->flags = GENPD_FLAG_IRQ_SAFE;
+ genpd->power_off = mpm_pd_power_off;
+
+ genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
+ if (!genpd->name)
+ return -ENOMEM;
+
+ ret = pm_genpd_init(genpd, NULL, false);
+ if (ret) {
+ dev_err(dev, "failed to init genpd: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(np, genpd);
+ if (ret) {
+ dev_err(dev, "failed to add genpd provider: %d\n", ret);
+ goto remove_genpd;
+ }
+
+ priv->mbox_client.dev = dev;
+ priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
+ if (IS_ERR(priv->mbox_chan)) {
+ ret = PTR_ERR(priv->mbox_chan);
+ dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
+ return ret;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(dev, "failed to find MPM parent domain\n");
+ ret = -ENXIO;
+ goto free_mbox;
+ }
+
+ priv->domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
+ of_node_to_fwnode(np), &qcom_mpm_ops, priv);
+ if (!priv->domain) {
+ dev_err(dev, "failed to create MPM domain\n");
+ ret = -ENOMEM;
+ goto free_mbox;
+ }
+
+ irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
+ "qcom_mpm", priv);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto remove_domain;
+ }
+
+ return 0;
+
+remove_domain:
+ irq_domain_remove(priv->domain);
+free_mbox:
+ mbox_free_channel(priv->mbox_chan);
+remove_genpd:
+ pm_genpd_remove(genpd);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
+IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c
new file mode 100644
index 000000000..9f0144a73
--- /dev/null
+++ b/drivers/irqchip/irq-rda-intc.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDA8810PL SoC irqchip driver
+ *
+ * Copyright RDA Microelectronics Company Limited
+ * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2018 Manivannan Sadhasivam
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+
+#include <asm/exception.h>
+
+#define RDA_INTC_FINALSTATUS 0x00
+#define RDA_INTC_MASK_SET 0x08
+#define RDA_INTC_MASK_CLR 0x0c
+
+#define RDA_IRQ_MASK_ALL 0xFFFFFFFF
+
+#define RDA_NR_IRQS 32
+
+static void __iomem *rda_intc_base;
+static struct irq_domain *rda_irq_domain;
+
+static void rda_intc_mask_irq(struct irq_data *d)
+{
+ writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_CLR);
+}
+
+static void rda_intc_unmask_irq(struct irq_data *d)
+{
+ writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_SET);
+}
+
+static int rda_intc_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* Hardware supports only level triggered interrupts */
+ if ((flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) == flow_type)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
+{
+ u32 stat = readl_relaxed(rda_intc_base + RDA_INTC_FINALSTATUS);
+ u32 hwirq;
+
+ while (stat) {
+ hwirq = __fls(stat);
+ generic_handle_domain_irq(rda_irq_domain, hwirq);
+ stat &= ~BIT(hwirq);
+ }
+}
+
+static struct irq_chip rda_irq_chip = {
+ .name = "rda-intc",
+ .irq_mask = rda_intc_mask_irq,
+ .irq_unmask = rda_intc_unmask_irq,
+ .irq_set_type = rda_intc_set_type,
+};
+
+static int rda_irq_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &rda_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rda_irq_domain_ops = {
+ .map = rda_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init rda8810_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ rda_intc_base = of_io_request_and_map(node, 0, "rda-intc");
+ if (IS_ERR(rda_intc_base))
+ return PTR_ERR(rda_intc_base);
+
+ /* Mask all interrupt sources */
+ writel_relaxed(RDA_IRQ_MASK_ALL, rda_intc_base + RDA_INTC_MASK_CLR);
+
+ rda_irq_domain = irq_domain_create_linear(&node->fwnode, RDA_NR_IRQS,
+ &rda_irq_domain_ops,
+ rda_intc_base);
+ if (!rda_irq_domain) {
+ iounmap(rda_intc_base);
+ return -ENOMEM;
+ }
+
+ set_handle_irq(rda_handle_irq);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(rda_intc, "rda,8810pl-intc", rda8810_intc_init);
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
new file mode 100644
index 000000000..2a349082a
--- /dev/null
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Birger Koblitz <mail@birger-koblitz.de>
+ * Copyright (C) 2020 Bert Vermeulen <bert@biot.com>
+ * Copyright (C) 2020 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/chained_irq.h>
+
+/* Global Interrupt Mask Register */
+#define RTL_ICTL_GIMR 0x00
+/* Global Interrupt Status Register */
+#define RTL_ICTL_GISR 0x04
+/* Interrupt Routing Registers */
+#define RTL_ICTL_IRR0 0x08
+#define RTL_ICTL_IRR1 0x0c
+#define RTL_ICTL_IRR2 0x10
+#define RTL_ICTL_IRR3 0x14
+
+#define RTL_ICTL_NUM_INPUTS 32
+
+#define REG(x) (realtek_ictl_base + x)
+
+static DEFINE_RAW_SPINLOCK(irq_lock);
+static void __iomem *realtek_ictl_base;
+
+/*
+ * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering,
+ * placing IRQ 31 in the first four bits. A routing value of '0' means the
+ * interrupt is left disconnected. Routing values {1..15} connect to output
+ * lines {0..14}.
+ */
+#define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32))
+#define IRR_SHIFT(idx) ((idx * 4) % 32)
+
+static void write_irr(void __iomem *irr0, int idx, u32 value)
+{
+ unsigned int offset = IRR_OFFSET(idx);
+ unsigned int shift = IRR_SHIFT(idx);
+ u32 irr;
+
+ irr = readl(irr0 + offset) & ~(0xf << shift);
+ irr |= (value & 0xf) << shift;
+ writel(irr, irr0 + offset);
+}
+
+static void realtek_ictl_unmask_irq(struct irq_data *i)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+
+ value = readl(REG(RTL_ICTL_GIMR));
+ value |= BIT(i->hwirq);
+ writel(value, REG(RTL_ICTL_GIMR));
+
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+static void realtek_ictl_mask_irq(struct irq_data *i)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+
+ value = readl(REG(RTL_ICTL_GIMR));
+ value &= ~BIT(i->hwirq);
+ writel(value, REG(RTL_ICTL_GIMR));
+
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+}
+
+static struct irq_chip realtek_ictl_irq = {
+ .name = "realtek-rtl-intc",
+ .irq_mask = realtek_ictl_mask_irq,
+ .irq_unmask = realtek_ictl_unmask_irq,
+};
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ unsigned long flags;
+
+ irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
+
+ raw_spin_lock_irqsave(&irq_lock, flags);
+ write_irr(REG(RTL_ICTL_IRR0), hw, 1);
+ raw_spin_unlock_irqrestore(&irq_lock, flags);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .map = intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void realtek_irq_dispatch(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain;
+ unsigned long pending;
+ unsigned int soc_int;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR));
+
+ if (unlikely(!pending)) {
+ spurious_interrupt();
+ goto out;
+ }
+
+ domain = irq_desc_get_handler_data(desc);
+ for_each_set_bit(soc_int, &pending, 32)
+ generic_handle_domain_irq(domain, soc_int);
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct of_phandle_args oirq;
+ struct irq_domain *domain;
+ unsigned int soc_irq;
+ int parent_irq;
+
+ realtek_ictl_base = of_iomap(node, 0);
+ if (!realtek_ictl_base)
+ return -ENXIO;
+
+ /* Disable all cascaded interrupts and clear routing */
+ writel(0, REG(RTL_ICTL_GIMR));
+ for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++)
+ write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0);
+
+ if (WARN_ON(!of_irq_count(node))) {
+ /*
+ * If DT contains no parent interrupts, assume MIPS CPU IRQ 2
+ * (HW0) is connected to the first output. This is the case for
+ * all known hardware anyway. "interrupt-map" is deprecated, so
+ * don't bother trying to parse that.
+ */
+ oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller");
+ oirq.args_count = 1;
+ oirq.args[0] = 2;
+
+ parent_irq = irq_create_of_mapping(&oirq);
+
+ of_node_put(oirq.np);
+ } else {
+ parent_irq = of_irq_get(node, 0);
+ }
+
+ if (parent_irq < 0)
+ return parent_irq;
+ else if (!parent_irq)
+ return -ENODEV;
+
+ domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
+ if (!domain)
+ return -ENOMEM;
+
+ irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(realtek_rtl_intc, "realtek,rtl-intc", realtek_rtl_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
new file mode 100644
index 000000000..e83756aca
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas INTC External IRQ Pin Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
+
+#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
+#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
+#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
+#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
+#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
+#define INTC_IRQPIN_REG_NR_MANDATORY 5
+#define INTC_IRQPIN_REG_IRLM 5 /* ICR0 with IRLM bit (optional) */
+#define INTC_IRQPIN_REG_NR 6
+
+/* INTC external IRQ PIN hardware register access:
+ *
+ * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
+ * PRIO is read-write 32-bit with 4-bits per IRQ (**)
+ * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
+ *
+ * (*) May be accessed by more than one driver instance - lock needed
+ * (**) Read-modify-write access by one driver instance - lock needed
+ * (***) Accessed by one driver instance only - no locking needed
+ */
+
+struct intc_irqpin_iomem {
+ void __iomem *iomem;
+ unsigned long (*read)(void __iomem *iomem);
+ void (*write)(void __iomem *iomem, unsigned long data);
+ int width;
+};
+
+struct intc_irqpin_irq {
+ int hw_irq;
+ int requested_irq;
+ int domain_irq;
+ struct intc_irqpin_priv *p;
+};
+
+struct intc_irqpin_priv {
+ struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
+ struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
+ unsigned int sense_bitfield_width;
+ struct platform_device *pdev;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
+ atomic_t wakeup_path;
+ unsigned shared_irqs:1;
+ u8 shared_irq_mask;
+};
+
+struct intc_irqpin_config {
+ int irlm_bit; /* -1 if non-existent */
+};
+
+static unsigned long intc_irqpin_read32(void __iomem *iomem)
+{
+ return ioread32(iomem);
+}
+
+static unsigned long intc_irqpin_read8(void __iomem *iomem)
+{
+ return ioread8(iomem);
+}
+
+static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
+{
+ iowrite32(data, iomem);
+}
+
+static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
+{
+ iowrite8(data, iomem);
+}
+
+static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
+ int reg)
+{
+ struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+ return i->read(i->iomem);
+}
+
+static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
+ int reg, unsigned long data)
+{
+ struct intc_irqpin_iomem *i = &p->iomem[reg];
+
+ i->write(i->iomem, data);
+}
+
+static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
+ int reg, int hw_irq)
+{
+ return BIT((p->iomem[reg].width - 1) - hw_irq);
+}
+
+static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
+ int reg, int hw_irq)
+{
+ intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
+}
+
+static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
+
+static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
+ int reg, int shift,
+ int width, int value)
+{
+ unsigned long flags;
+ unsigned long tmp;
+
+ raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
+
+ tmp = intc_irqpin_read(p, reg);
+ tmp &= ~(((1 << width) - 1) << shift);
+ tmp |= value << shift;
+ intc_irqpin_write(p, reg, tmp);
+
+ raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
+}
+
+static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
+ int irq, int do_mask)
+{
+ /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+ int bitfield_width = 4;
+ int shift = 32 - (irq + 1) * bitfield_width;
+
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
+ shift, bitfield_width,
+ do_mask ? 0 : (1 << bitfield_width) - 1);
+}
+
+static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
+{
+ /* The SENSE register is assumed to be 32-bit. */
+ int bitfield_width = p->sense_bitfield_width;
+ int shift = 32 - (irq + 1) * bitfield_width;
+
+ dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
+
+ if (value >= (1 << bitfield_width))
+ return -EINVAL;
+
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
+ bitfield_width, value);
+ return 0;
+}
+
+static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
+{
+ dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
+ str, i->requested_irq, i->hw_irq, i->domain_irq);
+}
+
+static void intc_irqpin_irq_enable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "enable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+}
+
+static void intc_irqpin_irq_disable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "disable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+}
+
+static void intc_irqpin_shared_irq_enable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
+
+ p->shared_irq_mask &= ~BIT(hw_irq);
+}
+
+static void intc_irqpin_shared_irq_disable(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
+ intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
+
+ p->shared_irq_mask |= BIT(hw_irq);
+}
+
+static void intc_irqpin_irq_enable_force(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+ intc_irqpin_irq_enable(d);
+
+ /* enable interrupt through parent interrupt controller,
+ * assumes non-shared interrupt with 1:1 mapping
+ * needed for busted IRQs on some SoCs like sh73a0
+ */
+ irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
+}
+
+static void intc_irqpin_irq_disable_force(struct irq_data *d)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
+
+ /* disable interrupt through parent interrupt controller,
+ * assumes non-shared interrupt with 1:1 mapping
+ * needed for busted IRQs on some SoCs like sh73a0
+ */
+ irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
+ intc_irqpin_irq_disable(d);
+}
+
+#define INTC_IRQ_SENSE_VALID 0x10
+#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
+
+static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+ [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
+ [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
+ [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
+ [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
+ [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
+};
+
+static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+
+ if (!(value & INTC_IRQ_SENSE_VALID))
+ return -EINVAL;
+
+ return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
+ value ^ INTC_IRQ_SENSE_VALID);
+}
+
+static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
+ if (on)
+ atomic_inc(&p->wakeup_path);
+ else
+ atomic_dec(&p->wakeup_path);
+
+ return 0;
+}
+
+static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
+{
+ struct intc_irqpin_irq *i = dev_id;
+ struct intc_irqpin_priv *p = i->p;
+ unsigned long bit;
+
+ intc_irqpin_dbg(i, "demux1");
+ bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
+
+ if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
+ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
+ intc_irqpin_dbg(i, "demux2");
+ generic_handle_irq(i->domain_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
+{
+ struct intc_irqpin_priv *p = dev_id;
+ unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
+ irqreturn_t status = IRQ_NONE;
+ int k;
+
+ for (k = 0; k < 8; k++) {
+ if (reg_source & BIT(7 - k)) {
+ if (BIT(k) & p->shared_irq_mask)
+ continue;
+
+ status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
+ }
+ }
+
+ return status;
+}
+
+/*
+ * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
+ * different category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key intc_irqpin_irq_lock_class;
+
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
+static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct intc_irqpin_priv *p = h->host_data;
+
+ p->irq[hw].domain_irq = virq;
+ p->irq[hw].hw_irq = hw;
+
+ intc_irqpin_dbg(&p->irq[hw], "map");
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+ &intc_irqpin_irq_request_class);
+ irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
+ .map = intc_irqpin_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
+ .irlm_bit = 23, /* ICR0.IRLM0 */
+};
+
+static const struct intc_irqpin_config intc_irqpin_rmobile = {
+ .irlm_bit = -1,
+};
+
+static const struct of_device_id intc_irqpin_dt_ids[] = {
+ { .compatible = "renesas,intc-irqpin", },
+ { .compatible = "renesas,intc-irqpin-r8a7778",
+ .data = &intc_irqpin_irlm_r8a777x },
+ { .compatible = "renesas,intc-irqpin-r8a7779",
+ .data = &intc_irqpin_irlm_r8a777x },
+ { .compatible = "renesas,intc-irqpin-r8a7740",
+ .data = &intc_irqpin_rmobile },
+ { .compatible = "renesas,intc-irqpin-sh73a0",
+ .data = &intc_irqpin_rmobile },
+ {},
+};
+MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
+
+static int intc_irqpin_probe(struct platform_device *pdev)
+{
+ const struct intc_irqpin_config *config;
+ struct device *dev = &pdev->dev;
+ struct intc_irqpin_priv *p;
+ struct intc_irqpin_iomem *i;
+ struct resource *io[INTC_IRQPIN_REG_NR];
+ struct irq_chip *irq_chip;
+ void (*enable_fn)(struct irq_data *d);
+ void (*disable_fn)(struct irq_data *d);
+ const char *name = dev_name(dev);
+ bool control_parent;
+ unsigned int nirqs;
+ int ref_irq;
+ int ret;
+ int k;
+
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ /* deal with driver instance configuration */
+ of_property_read_u32(dev->of_node, "sense-bitfield-width",
+ &p->sense_bitfield_width);
+ control_parent = of_property_read_bool(dev->of_node, "control-parent");
+ if (!p->sense_bitfield_width)
+ p->sense_bitfield_width = 4; /* default to 4 bits */
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ config = of_device_get_match_data(dev);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ /* get hold of register banks */
+ memset(io, 0, sizeof(io));
+ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+ io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
+ if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) {
+ dev_err(dev, "not enough IOMEM resources\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+ }
+
+ /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
+ for (k = 0; k < INTC_IRQPIN_MAX; k++) {
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
+ break;
+ if (ret < 0)
+ goto err0;
+
+ p->irq[k].p = p;
+ p->irq[k].requested_irq = ret;
+ }
+
+ nirqs = k;
+ if (nirqs < 1) {
+ dev_err(dev, "not enough IRQ resources\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ /* ioremap IOMEM and setup read/write callbacks */
+ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
+ i = &p->iomem[k];
+
+ /* handle optional registers */
+ if (!io[k])
+ continue;
+
+ switch (resource_size(io[k])) {
+ case 1:
+ i->width = 8;
+ i->read = intc_irqpin_read8;
+ i->write = intc_irqpin_write8;
+ break;
+ case 4:
+ i->width = 32;
+ i->read = intc_irqpin_read32;
+ i->write = intc_irqpin_write32;
+ break;
+ default:
+ dev_err(dev, "IOMEM size mismatch\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ i->iomem = devm_ioremap(dev, io[k]->start,
+ resource_size(io[k]));
+ if (!i->iomem) {
+ dev_err(dev, "failed to remap IOMEM\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+ }
+
+ /* configure "individual IRQ mode" where needed */
+ if (config && config->irlm_bit >= 0) {
+ if (io[INTC_IRQPIN_REG_IRLM])
+ intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
+ config->irlm_bit, 1, 1);
+ else
+ dev_warn(dev, "unable to select IRLM mode\n");
+ }
+
+ /* mask all interrupts using priority */
+ for (k = 0; k < nirqs; k++)
+ intc_irqpin_mask_unmask_prio(p, k, 1);
+
+ /* clear all pending interrupts */
+ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
+
+ /* scan for shared interrupt lines */
+ ref_irq = p->irq[0].requested_irq;
+ p->shared_irqs = 1;
+ for (k = 1; k < nirqs; k++) {
+ if (ref_irq != p->irq[k].requested_irq) {
+ p->shared_irqs = 0;
+ break;
+ }
+ }
+
+ /* use more severe masking method if requested */
+ if (control_parent) {
+ enable_fn = intc_irqpin_irq_enable_force;
+ disable_fn = intc_irqpin_irq_disable_force;
+ } else if (!p->shared_irqs) {
+ enable_fn = intc_irqpin_irq_enable;
+ disable_fn = intc_irqpin_irq_disable;
+ } else {
+ enable_fn = intc_irqpin_shared_irq_enable;
+ disable_fn = intc_irqpin_shared_irq_disable;
+ }
+
+ irq_chip = &p->irq_chip;
+ irq_chip->name = "intc-irqpin";
+ irq_chip->irq_mask = disable_fn;
+ irq_chip->irq_unmask = enable_fn;
+ irq_chip->irq_set_type = intc_irqpin_irq_set_type;
+ irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
+ irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
+ &intc_irqpin_irq_domain_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(dev, "cannot initialize irq domain\n");
+ goto err0;
+ }
+
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
+ if (p->shared_irqs) {
+ /* request one shared interrupt */
+ if (devm_request_irq(dev, p->irq[0].requested_irq,
+ intc_irqpin_shared_irq_handler,
+ IRQF_SHARED, name, p)) {
+ dev_err(dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+ } else {
+ /* request interrupts one by one */
+ for (k = 0; k < nirqs; k++) {
+ if (devm_request_irq(dev, p->irq[k].requested_irq,
+ intc_irqpin_irq_handler, 0, name,
+ &p->irq[k])) {
+ dev_err(dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+ }
+ }
+
+ /* unmask all interrupts on prio level */
+ for (k = 0; k < nirqs; k++)
+ intc_irqpin_mask_unmask_prio(p, k, 0);
+
+ dev_info(dev, "driving %d irqs\n", nirqs);
+
+ return 0;
+
+err1:
+ irq_domain_remove(p->irq_domain);
+err0:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int intc_irqpin_remove(struct platform_device *pdev)
+{
+ struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
+
+ irq_domain_remove(p->irq_domain);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused intc_irqpin_suspend(struct device *dev)
+{
+ struct intc_irqpin_priv *p = dev_get_drvdata(dev);
+
+ if (atomic_read(&p->wakeup_path))
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
+
+static struct platform_driver intc_irqpin_device_driver = {
+ .probe = intc_irqpin_probe,
+ .remove = intc_irqpin_remove,
+ .driver = {
+ .name = "renesas_intc_irqpin",
+ .of_match_table = intc_irqpin_dt_ids,
+ .pm = &intc_irqpin_pm_ops,
+ }
+};
+
+static int __init intc_irqpin_init(void)
+{
+ return platform_driver_register(&intc_irqpin_device_driver);
+}
+postcore_initcall(intc_irqpin_init);
+
+static void __exit intc_irqpin_exit(void)
+{
+ platform_driver_unregister(&intc_irqpin_device_driver);
+}
+module_exit(intc_irqpin_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
new file mode 100644
index 000000000..1ee5e9941
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas IRQC Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
+
+#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
+#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
+#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
+#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
+ /* SYS-CPU vs. RT-CPU */
+#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
+#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
+#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
+#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
+#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
+#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
+#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
+#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
+#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
+#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
+ /* IRQn Configuration Register */
+
+struct irqc_irq {
+ int hw_irq;
+ int requested_irq;
+ struct irqc_priv *p;
+};
+
+struct irqc_priv {
+ void __iomem *iomem;
+ void __iomem *cpu_int_base;
+ struct irqc_irq irq[IRQC_IRQ_MAX];
+ unsigned int number_of_irqs;
+ struct device *dev;
+ struct irq_chip_generic *gc;
+ struct irq_domain *irq_domain;
+ atomic_t wakeup_path;
+};
+
+static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void irqc_dbg(struct irqc_irq *i, char *str)
+{
+ dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
+}
+
+static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
+ [IRQ_TYPE_LEVEL_LOW] = 0x01,
+ [IRQ_TYPE_LEVEL_HIGH] = 0x02,
+ [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
+ [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
+ [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
+};
+
+static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct irqc_priv *p = irq_data_to_priv(d);
+ int hw_irq = irqd_to_hwirq(d);
+ unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
+ u32 tmp;
+
+ irqc_dbg(&p->irq[hw_irq], "sense");
+
+ if (!value)
+ return -EINVAL;
+
+ tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
+ tmp &= ~0x3f;
+ tmp |= value;
+ iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
+ return 0;
+}
+
+static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct irqc_priv *p = irq_data_to_priv(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
+ if (on)
+ atomic_inc(&p->wakeup_path);
+ else
+ atomic_dec(&p->wakeup_path);
+
+ return 0;
+}
+
+static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
+{
+ struct irqc_irq *i = dev_id;
+ struct irqc_priv *p = i->p;
+ u32 bit = BIT(i->hw_irq);
+
+ irqc_dbg(i, "demux1");
+
+ if (ioread32(p->iomem + DETECT_STATUS) & bit) {
+ iowrite32(bit, p->iomem + DETECT_STATUS);
+ irqc_dbg(i, "demux2");
+ generic_handle_domain_irq(p->irq_domain, i->hw_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int irqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
+ struct irqc_priv *p;
+ int ret;
+ int k;
+
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->dev = dev;
+ platform_set_drvdata(pdev, p);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
+ for (k = 0; k < IRQC_IRQ_MAX; k++) {
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
+ break;
+ if (ret < 0)
+ goto err_runtime_pm_disable;
+
+ p->irq[k].p = p;
+ p->irq[k].hw_irq = k;
+ p->irq[k].requested_irq = ret;
+ }
+
+ p->number_of_irqs = k;
+ if (p->number_of_irqs < 1) {
+ dev_err(dev, "not enough IRQ resources\n");
+ ret = -EINVAL;
+ goto err_runtime_pm_disable;
+ }
+
+ /* ioremap IOMEM and setup read/write callbacks */
+ p->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->iomem)) {
+ ret = PTR_ERR(p->iomem);
+ goto err_runtime_pm_disable;
+ }
+
+ p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
+
+ p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
+ &irq_generic_chip_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
+ dev_err(dev, "cannot initialize irq domain\n");
+ goto err_runtime_pm_disable;
+ }
+
+ ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
+ 1, "irqc", handle_level_irq,
+ 0, 0, IRQ_GC_INIT_NESTED_LOCK);
+ if (ret) {
+ dev_err(dev, "cannot allocate generic chip\n");
+ goto err_remove_domain;
+ }
+
+ p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
+ p->gc->reg_base = p->cpu_int_base;
+ p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
+ p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
+ p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
+ p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
+ p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ irq_domain_set_pm_device(p->irq_domain, dev);
+
+ /* request interrupts one by one */
+ for (k = 0; k < p->number_of_irqs; k++) {
+ if (devm_request_irq(dev, p->irq[k].requested_irq,
+ irqc_irq_handler, 0, name, &p->irq[k])) {
+ dev_err(dev, "failed to request IRQ\n");
+ ret = -ENOENT;
+ goto err_remove_domain;
+ }
+ }
+
+ dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
+
+ return 0;
+
+err_remove_domain:
+ irq_domain_remove(p->irq_domain);
+err_runtime_pm_disable:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int irqc_remove(struct platform_device *pdev)
+{
+ struct irqc_priv *p = platform_get_drvdata(pdev);
+
+ irq_domain_remove(p->irq_domain);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused irqc_suspend(struct device *dev)
+{
+ struct irqc_priv *p = dev_get_drvdata(dev);
+
+ if (atomic_read(&p->wakeup_path))
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
+
+static const struct of_device_id irqc_dt_ids[] = {
+ { .compatible = "renesas,irqc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, irqc_dt_ids);
+
+static struct platform_driver irqc_device_driver = {
+ .probe = irqc_probe,
+ .remove = irqc_remove,
+ .driver = {
+ .name = "renesas_irqc",
+ .of_match_table = irqc_dt_ids,
+ .pm = &irqc_pm_ops,
+ }
+};
+
+static int __init irqc_init(void)
+{
+ return platform_driver_register(&irqc_device_driver);
+}
+postcore_initcall(irqc_init);
+
+static void __exit irqc_exit(void)
+{
+ platform_driver_unregister(&irqc_device_driver);
+}
+module_exit(irqc_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
new file mode 100644
index 000000000..72c06e883
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/A1 IRQC Driver
+ *
+ * Copyright (C) 2019 Glider bvba
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQC_NUM_IRQ 8
+
+#define ICR0 0 /* Interrupt Control Register 0 */
+
+#define ICR0_NMIL BIT(15) /* NMI Input Level (0=low, 1=high) */
+#define ICR0_NMIE BIT(8) /* Edge Select (0=falling, 1=rising) */
+#define ICR0_NMIF BIT(1) /* NMI Interrupt Request */
+
+#define ICR1 2 /* Interrupt Control Register 1 */
+
+#define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */
+#define ICR1_IRQS_LEVEL_LOW 0
+#define ICR1_IRQS_EDGE_FALLING 1
+#define ICR1_IRQS_EDGE_RISING 2
+#define ICR1_IRQS_EDGE_BOTH 3
+#define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3)
+
+#define IRQRR 4 /* IRQ Interrupt Request Register */
+
+
+struct rza1_irqc_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *irq_domain;
+ struct of_phandle_args map[IRQC_NUM_IRQ];
+};
+
+static struct rza1_irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rza1_irqc_eoi(struct irq_data *d)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ u16 bit = BIT(irqd_to_hwirq(d));
+ u16 tmp;
+
+ tmp = readw_relaxed(priv->base + IRQRR);
+ if (tmp & bit)
+ writew_relaxed(GENMASK(IRQC_NUM_IRQ - 1, 0) & ~bit,
+ priv->base + IRQRR);
+
+ irq_chip_eoi_parent(d);
+}
+
+static int rza1_irqc_set_type(struct irq_data *d, unsigned int type)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = ICR1_IRQS_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICR1_IRQS_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICR1_IRQS_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = ICR1_IRQS_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tmp = readw_relaxed(priv->base + ICR1);
+ tmp &= ~ICR1_IRQS_MASK(hw_irq);
+ tmp |= ICR1_IRQS(hw_irq, sense);
+ writew_relaxed(tmp, priv->base + ICR1);
+ return 0;
+}
+
+static int rza1_irqc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct rza1_irqc_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ unsigned int hwirq = fwspec->param[0];
+ struct irq_fwspec spec;
+ unsigned int i;
+ int ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->chip,
+ priv);
+ if (ret)
+ return ret;
+
+ spec.fwnode = &priv->dev->of_node->fwnode;
+ spec.param_count = priv->map[hwirq].args_count;
+ for (i = 0; i < spec.param_count; i++)
+ spec.param[i] = priv->map[hwirq].args[i];
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &spec);
+}
+
+static int rza1_irqc_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec, unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (fwspec->param_count != 2 || fwspec->param[0] >= IRQC_NUM_IRQ)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+
+static const struct irq_domain_ops rza1_irqc_domain_ops = {
+ .alloc = rza1_irqc_alloc,
+ .translate = rza1_irqc_translate,
+};
+
+static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
+ struct device_node *gic_node)
+{
+ unsigned int imaplen, i, j, ret;
+ struct device *dev = priv->dev;
+ struct device_node *ipar;
+ const __be32 *imap;
+ u32 intsize;
+
+ imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
+ if (!imap)
+ return -EINVAL;
+
+ for (i = 0; i < IRQC_NUM_IRQ; i++) {
+ if (imaplen < 3)
+ return -EINVAL;
+
+ /* Check interrupt number, ignore sense */
+ if (be32_to_cpup(imap) != i)
+ return -EINVAL;
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(imap + 2));
+ if (ipar != gic_node) {
+ of_node_put(ipar);
+ return -EINVAL;
+ }
+
+ imap += 3;
+ imaplen -= 3;
+
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ of_node_put(ipar);
+ if (ret)
+ return ret;
+
+ if (imaplen < intsize)
+ return -EINVAL;
+
+ priv->map[i].args_count = intsize;
+ for (j = 0; j < intsize; j++)
+ priv->map[i].args[j] = be32_to_cpup(imap++);
+
+ imaplen -= intsize;
+ }
+
+ return 0;
+}
+
+static int rza1_irqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent = NULL;
+ struct device_node *gic_node;
+ struct rza1_irqc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ gic_node = of_irq_find_parent(np);
+ if (gic_node)
+ parent = irq_find_host(gic_node);
+
+ if (!parent) {
+ dev_err(dev, "cannot find parent domain\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ ret = rza1_irqc_parse_map(priv, gic_node);
+ if (ret) {
+ dev_err(dev, "cannot parse %s: %d\n", "interrupt-map", ret);
+ goto out_put_node;
+ }
+
+ priv->chip.name = "rza1-irqc";
+ priv->chip.irq_mask = irq_chip_mask_parent;
+ priv->chip.irq_unmask = irq_chip_unmask_parent;
+ priv->chip.irq_eoi = rza1_irqc_eoi;
+ priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
+ priv->chip.irq_set_type = rza1_irqc_set_type;
+ priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+ priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
+ np, &rza1_irqc_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "cannot initialize irq domain\n");
+ ret = -ENOMEM;
+ }
+
+out_put_node:
+ of_node_put(gic_node);
+ return ret;
+}
+
+static int rza1_irqc_remove(struct platform_device *pdev)
+{
+ struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->irq_domain);
+ return 0;
+}
+
+static const struct of_device_id rza1_irqc_dt_ids[] = {
+ { .compatible = "renesas,rza1-irqc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
+
+static struct platform_driver rza1_irqc_device_driver = {
+ .probe = rza1_irqc_probe,
+ .remove = rza1_irqc_remove,
+ .driver = {
+ .name = "renesas_rza1_irqc",
+ .of_match_table = rza1_irqc_dt_ids,
+ }
+};
+
+static int __init rza1_irqc_init(void)
+{
+ return platform_driver_register(&rza1_irqc_device_driver);
+}
+postcore_initcall(rza1_irqc_init);
+
+static void __exit rza1_irqc_exit(void)
+{
+ platform_driver_unregister(&rza1_irqc_device_driver);
+}
+module_exit(rza1_irqc_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
new file mode 100644
index 000000000..10c3e85c9
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L IRQC Driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define IRQC_IRQ_START 1
+#define IRQC_IRQ_COUNT 8
+#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT)
+#define IRQC_TINT_COUNT 32
+#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT)
+
+#define ISCR 0x10
+#define IITSR 0x14
+#define TSCR 0x20
+#define TITSR0 0x24
+#define TITSR1 0x28
+#define TITSR0_MAX_INT 16
+#define TITSEL_WIDTH 0x2
+#define TSSR(n) (0x30 + ((n) * 4))
+#define TIEN BIT(7)
+#define TSSEL_SHIFT(n) (8 * (n))
+#define TSSEL_MASK GENMASK(7, 0)
+#define IRQ_MASK 0x3
+
+#define TSSR_OFFSET(n) ((n) % 4)
+#define TSSR_INDEX(n) ((n) / 4)
+
+#define TITSR_TITSEL_EDGE_RISING 0
+#define TITSR_TITSEL_EDGE_FALLING 1
+#define TITSR_TITSEL_LEVEL_HIGH 2
+#define TITSR_TITSEL_LEVEL_LOW 3
+
+#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2))
+#define IITSR_IITSEL_LEVEL_LOW 0
+#define IITSR_IITSEL_EDGE_FALLING 1
+#define IITSR_IITSEL_EDGE_RISING 2
+#define IITSR_IITSEL_EDGE_BOTH 3
+#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
+
+#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+
+struct rzg2l_irqc_priv {
+ void __iomem *base;
+ struct irq_fwspec fwspec[IRQC_NUM_IRQ];
+ raw_spinlock_t lock;
+};
+
+static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rzg2l_irq_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 bit = BIT(hw_irq);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + ISCR);
+ if (reg & bit)
+ writel_relaxed(reg & ~bit, priv->base + ISCR);
+}
+
+static void rzg2l_tint_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 bit = BIT(hw_irq);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + TSCR);
+ if (reg & bit)
+ writel_relaxed(reg & ~bit, priv->base + TSCR);
+}
+
+static void rzg2l_irqc_eoi(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+ rzg2l_irq_eoi(d);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+ rzg2l_tint_eoi(d);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_eoi_parent(d);
+}
+
+static void rzg2l_irqc_irq_disable(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 offset = hw_irq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+ u8 tssr_index = TSSR_INDEX(offset);
+ u32 reg;
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+ irq_chip_disable_parent(d);
+}
+
+static void rzg2l_irqc_irq_enable(struct irq_data *d)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned long tint = (uintptr_t)d->chip_data;
+ u32 offset = hw_irq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+ u8 tssr_index = TSSR_INDEX(offset);
+ u32 reg;
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+ reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+ irq_chip_enable_parent(d);
+}
+
+static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = IITSR_IITSEL_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = IITSR_IITSEL_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = IITSR_IITSEL_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = IITSR_IITSEL_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ raw_spin_lock(&priv->lock);
+ tmp = readl_relaxed(priv->base + IITSR);
+ tmp &= ~IITSR_IITSEL_MASK(hw_irq);
+ tmp |= IITSR_IITSEL(hw_irq, sense);
+ writel_relaxed(tmp, priv->base + IITSR);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 titseln = hwirq - IRQC_TINT_START;
+ u32 offset;
+ u8 sense;
+ u32 reg;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ sense = TITSR_TITSEL_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = TITSR_TITSEL_EDGE_FALLING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ offset = TITSR0;
+ if (titseln >= TITSR0_MAX_INT) {
+ titseln -= TITSR0_MAX_INT;
+ offset = TITSR1;
+ }
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + offset);
+ reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ reg |= sense << (titseln * TITSEL_WIDTH);
+ writel_relaxed(reg, priv->base + offset);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ int ret = -EINVAL;
+
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+ ret = rzg2l_irq_set_type(d, type);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+ ret = rzg2l_tint_set_edge(d, type);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static const struct irq_chip irqc_chip = {
+ .name = "rzg2l-irqc",
+ .irq_eoi = rzg2l_irqc_eoi,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_disable = rzg2l_irqc_irq_disable,
+ .irq_enable = rzg2l_irqc_irq_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = rzg2l_irqc_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct rzg2l_irqc_priv *priv = domain->host_data;
+ unsigned long tint = 0;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ /*
+ * For TINT interrupts ie where pinctrl driver is child of irqc domain
+ * the hwirq and TINT are encoded in fwspec->param[0].
+ * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
+ * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
+ * in IRQC registers to enable a given gpio pin as interrupt.
+ */
+ if (hwirq > IRQC_IRQ_COUNT) {
+ tint = TINT_EXTRACT_GPIOINT(hwirq);
+ hwirq = TINT_EXTRACT_HWIRQ(hwirq);
+
+ if (hwirq < IRQC_TINT_START)
+ return -EINVAL;
+ }
+
+ if (hwirq > (IRQC_NUM_IRQ - 1))
+ return -EINVAL;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
+ (void *)(uintptr_t)tint);
+ if (ret)
+ return ret;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
+}
+
+static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
+ .alloc = rzg2l_irqc_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = irq_domain_translate_twocell,
+};
+
+static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
+ struct device_node *np)
+{
+ struct of_phandle_args map;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < IRQC_NUM_IRQ; i++) {
+ ret = of_irq_parse_one(np, i, &map);
+ if (ret)
+ return ret;
+ of_phandle_args_to_fwspec(np, map.args, map.args_count,
+ &priv->fwspec[i]);
+ }
+
+ return 0;
+}
+
+static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *irq_domain, *parent_domain;
+ struct platform_device *pdev;
+ struct reset_control *resetn;
+ struct rzg2l_irqc_priv *priv;
+ int ret;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = rzg2l_irqc_parse_interrupts(priv, node);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
+ return ret;
+ }
+
+ resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(resetn))
+ return PTR_ERR(resetn);
+
+ ret = reset_control_deassert(resetn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
+ goto pm_disable;
+ }
+
+ raw_spin_lock_init(&priv->lock);
+
+ irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
+ node, &rzg2l_irqc_domain_ops,
+ priv);
+ if (!irq_domain) {
+ dev_err(&pdev->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto pm_put;
+ }
+
+ return 0;
+
+pm_put:
+ pm_runtime_put(&pdev->dev);
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(resetn);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
+IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
+IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
new file mode 100644
index 000000000..4b6685097
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#define pr_fmt(fmt) "riscv-intc: " fmt
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+static struct irq_domain *intc_domain;
+
+static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+{
+ unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+ if (unlikely(cause >= BITS_PER_LONG))
+ panic("unexpected interrupt cause");
+
+ switch (cause) {
+#ifdef CONFIG_SMP
+ case RV_IRQ_SOFT:
+ /*
+ * We only use software interrupts to pass IPIs, so if a
+ * non-SMP system gets one, then we don't know what to do.
+ */
+ handle_IPI(regs);
+ break;
+#endif
+ default:
+ generic_handle_domain_irq(intc_domain, cause);
+ break;
+ }
+}
+
+/*
+ * On RISC-V systems local interrupts are masked or unmasked by writing
+ * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
+ * on the local hart, these functions can only be called on the hart that
+ * corresponds to the IRQ chip.
+ */
+
+static void riscv_intc_irq_mask(struct irq_data *d)
+{
+ csr_clear(CSR_IE, BIT(d->hwirq));
+}
+
+static void riscv_intc_irq_unmask(struct irq_data *d)
+{
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static int riscv_intc_cpu_starting(unsigned int cpu)
+{
+ csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static int riscv_intc_cpu_dying(unsigned int cpu)
+{
+ csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static struct irq_chip riscv_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = riscv_intc_irq_mask,
+ .irq_unmask = riscv_intc_irq_unmask,
+};
+
+static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops riscv_intc_domain_ops = {
+ .map = riscv_intc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init riscv_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int rc;
+ unsigned long hartid;
+
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
+ pr_warn("unable to find hart id for %pOF\n", node);
+ return 0;
+ }
+
+ /*
+ * The DT will have one INTC DT node under each CPU (or HART)
+ * DT node so riscv_intc_init() function will be called once
+ * for each INTC DT node. We only need to do INTC initialization
+ * for the INTC DT node belonging to boot CPU (or boot HART).
+ */
+ if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
+ /*
+ * The INTC nodes of each CPU are suppliers for downstream
+ * interrupt controllers (such as PLIC, IMSIC and APLIC
+ * direct-mode) so we should mark an INTC node as initialized
+ * if we are not creating IRQ domain for it.
+ */
+ fwnode_dev_initialized(of_fwnode_handle(node), true);
+ return 0;
+ }
+
+ intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+ &riscv_intc_domain_ops, NULL);
+ if (!intc_domain) {
+ pr_err("unable to add IRQ domain\n");
+ return -ENXIO;
+ }
+
+ rc = set_handle_irq(&riscv_intc_irq);
+ if (rc) {
+ pr_err("failed to set irq handler\n");
+ return rc;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
+ "irqchip/riscv/intc:starting",
+ riscv_intc_cpu_starting,
+ riscv_intc_cpu_dying);
+
+ pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
new file mode 100644
index 000000000..31c202a1a
--- /dev/null
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Dmitry Eremin-Solenikov
+ * Copyright (C) 1999-2001 Nicolas Pitre
+ *
+ * Generic IRQ handling for the SA11x0.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqchip/irq-sa11x0.h>
+
+#include <soc/sa1100/pwer.h>
+
+#include <asm/exception.h>
+
+#define ICIP 0x00 /* IC IRQ Pending reg. */
+#define ICMR 0x04 /* IC Mask Reg. */
+#define ICLR 0x08 /* IC Level Reg. */
+#define ICCR 0x0C /* IC Control Reg. */
+#define ICFP 0x10 /* IC FIQ Pending reg. */
+#define ICPR 0x20 /* IC Pending Reg. */
+
+static void __iomem *iobase;
+
+/*
+ * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
+ * this is for internal IRQs i.e. from IRQ LCD to RTCAlrm.
+ */
+static void sa1100_mask_irq(struct irq_data *d)
+{
+ u32 reg;
+
+ reg = readl_relaxed(iobase + ICMR);
+ reg &= ~BIT(d->hwirq);
+ writel_relaxed(reg, iobase + ICMR);
+}
+
+static void sa1100_unmask_irq(struct irq_data *d)
+{
+ u32 reg;
+
+ reg = readl_relaxed(iobase + ICMR);
+ reg |= BIT(d->hwirq);
+ writel_relaxed(reg, iobase + ICMR);
+}
+
+static int sa1100_set_wake(struct irq_data *d, unsigned int on)
+{
+ return sa11x0_sc_set_wake(d->hwirq, on);
+}
+
+static struct irq_chip sa1100_normal_chip = {
+ .name = "SC",
+ .irq_ack = sa1100_mask_irq,
+ .irq_mask = sa1100_mask_irq,
+ .irq_unmask = sa1100_unmask_irq,
+ .irq_set_wake = sa1100_set_wake,
+};
+
+static int sa1100_normal_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sa1100_normal_chip,
+ handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sa1100_normal_irqdomain_ops = {
+ .map = sa1100_normal_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static struct irq_domain *sa1100_normal_irqdomain;
+
+static struct sa1100irq_state {
+ unsigned int saved;
+ unsigned int icmr;
+ unsigned int iclr;
+ unsigned int iccr;
+} sa1100irq_state;
+
+static int sa1100irq_suspend(void)
+{
+ struct sa1100irq_state *st = &sa1100irq_state;
+
+ st->saved = 1;
+ st->icmr = readl_relaxed(iobase + ICMR);
+ st->iclr = readl_relaxed(iobase + ICLR);
+ st->iccr = readl_relaxed(iobase + ICCR);
+
+ /*
+ * Disable all GPIO-based interrupts.
+ */
+ writel_relaxed(st->icmr & 0xfffff000, iobase + ICMR);
+
+ return 0;
+}
+
+static void sa1100irq_resume(void)
+{
+ struct sa1100irq_state *st = &sa1100irq_state;
+
+ if (st->saved) {
+ writel_relaxed(st->iccr, iobase + ICCR);
+ writel_relaxed(st->iclr, iobase + ICLR);
+
+ writel_relaxed(st->icmr, iobase + ICMR);
+ }
+}
+
+static struct syscore_ops sa1100irq_syscore_ops = {
+ .suspend = sa1100irq_suspend,
+ .resume = sa1100irq_resume,
+};
+
+static int __init sa1100irq_init_devicefs(void)
+{
+ register_syscore_ops(&sa1100irq_syscore_ops);
+ return 0;
+}
+
+device_initcall(sa1100irq_init_devicefs);
+
+static asmlinkage void __exception_irq_entry
+sa1100_handle_irq(struct pt_regs *regs)
+{
+ uint32_t icip, icmr, mask;
+
+ do {
+ icip = readl_relaxed(iobase + ICIP);
+ icmr = readl_relaxed(iobase + ICMR);
+ mask = icip & icmr;
+
+ if (mask == 0)
+ break;
+
+ generic_handle_domain_irq(sa1100_normal_irqdomain,
+ ffs(mask) - 1);
+ } while (1);
+}
+
+void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start)
+{
+ iobase = ioremap(io_start, SZ_64K);
+ if (WARN_ON(!iobase))
+ return;
+
+ /* disable all IRQs */
+ writel_relaxed(0, iobase + ICMR);
+
+ /* all IRQs are IRQ, not FIQ */
+ writel_relaxed(0, iobase + ICLR);
+
+ /*
+ * Whatever the doc says, this has to be set for the wait-on-irq
+ * instruction to work... on a SA1100 rev 9 at least.
+ */
+ writel_relaxed(1, iobase + ICCR);
+
+ sa1100_normal_irqdomain = irq_domain_add_simple(NULL,
+ 32, irq_start,
+ &sa1100_normal_irqdomain_ops, NULL);
+
+ set_handle_irq(sa1100_handle_irq);
+}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
new file mode 100644
index 000000000..2f4784860
--- /dev/null
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+#define pr_fmt(fmt) "plic: " fmt
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <asm/smp.h>
+
+/*
+ * This driver implements a version of the RISC-V PLIC with the actual layout
+ * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
+ *
+ * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
+ *
+ * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
+ * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
+ * Spec.
+ */
+
+#define MAX_DEVICES 1024
+#define MAX_CONTEXTS 15872
+
+/*
+ * Each interrupt source has a priority register associated with it.
+ * We always hardwire it to one in Linux.
+ */
+#define PRIORITY_BASE 0
+#define PRIORITY_PER_ID 4
+
+/*
+ * Each hart context has a vector of interrupt enable bits associated with it.
+ * There's one bit for each interrupt source.
+ */
+#define CONTEXT_ENABLE_BASE 0x2000
+#define CONTEXT_ENABLE_SIZE 0x80
+
+/*
+ * Each hart context has a set of control registers associated with it. Right
+ * now there's only two: a source priority threshold over which the hart will
+ * take an interrupt, and a register to claim interrupts.
+ */
+#define CONTEXT_BASE 0x200000
+#define CONTEXT_SIZE 0x1000
+#define CONTEXT_THRESHOLD 0x00
+#define CONTEXT_CLAIM 0x04
+
+#define PLIC_DISABLE_THRESHOLD 0x7
+#define PLIC_ENABLE_THRESHOLD 0
+
+#define PLIC_QUIRK_EDGE_INTERRUPT 0
+
+struct plic_priv {
+ struct cpumask lmask;
+ struct irq_domain *irqdomain;
+ void __iomem *regs;
+ unsigned long plic_quirks;
+};
+
+struct plic_handler {
+ bool present;
+ void __iomem *hart_base;
+ /*
+ * Protect mask operations on the registers given that we can't
+ * assume atomic memory operations work on them.
+ */
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
+ struct plic_priv *priv;
+};
+static int plic_parent_irq __ro_after_init;
+static bool plic_cpuhp_setup_done __ro_after_init;
+static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+
+static int plic_irq_set_type(struct irq_data *d, unsigned int type);
+
+static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
+{
+ u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
+ u32 hwirq_mask = 1 << (hwirq % 32);
+
+ if (enable)
+ writel(readl(reg) | hwirq_mask, reg);
+ else
+ writel(readl(reg) & ~hwirq_mask, reg);
+}
+
+static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
+{
+ raw_spin_lock(&handler->enable_lock);
+ __plic_toggle(handler->enable_base, hwirq, enable);
+ raw_spin_unlock(&handler->enable_lock);
+}
+
+static inline void plic_irq_toggle(const struct cpumask *mask,
+ struct irq_data *d, int enable)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ plic_toggle(handler, d->hwirq, enable);
+ }
+}
+
+static void plic_irq_enable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
+}
+
+static void plic_irq_unmask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_mask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ unsigned int cpu;
+ struct cpumask amask;
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ cpumask_and(&amask, &priv->lmask, mask_val);
+
+ if (force)
+ cpu = cpumask_first(&amask);
+ else
+ cpu = cpumask_any_and(&amask, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ plic_irq_disable(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ if (!irqd_irq_disabled(d))
+ plic_irq_enable(d);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip plic_edge_chip = {
+ .name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
+ .irq_ack = plic_irq_eoi,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
+ .irq_set_type = plic_irq_set_type,
+ .flags = IRQCHIP_AFFINITY_PRE_STARTUP,
+};
+
+static struct irq_chip plic_chip = {
+ .name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+ .irq_eoi = plic_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
+ .irq_set_type = plic_irq_set_type,
+ .flags = IRQCHIP_AFFINITY_PRE_STARTUP,
+};
+
+static int plic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
+ return IRQ_SET_MASK_OK_NOCOPY;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_chip_handler_name_locked(d, &plic_edge_chip,
+ handle_edge_irq, NULL);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_chip_handler_name_locked(d, &plic_chip,
+ handle_fasteoi_irq, NULL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plic_priv *priv = d->host_data;
+
+ irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ irq_set_noprobe(irq);
+ irq_set_affinity(irq, &priv->lmask);
+ return 0;
+}
+
+static int plic_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct plic_priv *priv = d->host_data;
+
+ if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks))
+ return irq_domain_translate_twocell(d, fwspec, hwirq, type);
+
+ return irq_domain_translate_onecell(d, fwspec, hwirq, type);
+}
+
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops plic_irqdomain_ops = {
+ .translate = plic_irq_domain_translate,
+ .alloc = plic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+/*
+ * Handling an interrupt is a two-step process: first you claim the interrupt
+ * by reading the claim register, then you complete the interrupt by writing
+ * that source ID back to the same claim register. This automatically enables
+ * and disables the interrupt, so there's nothing else to do.
+ */
+static void plic_handle_irq(struct irq_desc *desc)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = readl(claim))) {
+ int err = generic_handle_domain_irq(handler->priv->irqdomain,
+ hwirq);
+ if (unlikely(err))
+ pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
+ hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
+{
+ /* priority must be > threshold to trigger an interrupt */
+ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
+}
+
+static int plic_dying_cpu(unsigned int cpu)
+{
+ if (plic_parent_irq)
+ disable_percpu_irq(plic_parent_irq);
+
+ return 0;
+}
+
+static int plic_starting_cpu(unsigned int cpu)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ if (plic_parent_irq)
+ enable_percpu_irq(plic_parent_irq,
+ irq_get_trigger_type(plic_parent_irq));
+ else
+ pr_warn("cpu%d: parent irq not available\n", cpu);
+ plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
+
+ return 0;
+}
+
+static int __init __plic_init(struct device_node *node,
+ struct device_node *parent,
+ unsigned long plic_quirks)
+{
+ int error = 0, nr_contexts, nr_handlers = 0, i;
+ u32 nr_irqs;
+ struct plic_priv *priv;
+ struct plic_handler *handler;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->plic_quirks = plic_quirks;
+
+ priv->regs = of_iomap(node, 0);
+ if (WARN_ON(!priv->regs)) {
+ error = -EIO;
+ goto out_free_priv;
+ }
+
+ error = -EINVAL;
+ of_property_read_u32(node, "riscv,ndev", &nr_irqs);
+ if (WARN_ON(!nr_irqs))
+ goto out_iounmap;
+
+ nr_contexts = of_irq_count(node);
+ if (WARN_ON(!nr_contexts))
+ goto out_iounmap;
+
+ error = -ENOMEM;
+ priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain))
+ goto out_iounmap;
+
+ for (i = 0; i < nr_contexts; i++) {
+ struct of_phandle_args parent;
+ irq_hw_number_t hwirq;
+ int cpu;
+ unsigned long hartid;
+
+ if (of_irq_parse_one(node, i, &parent)) {
+ pr_err("failed to parse parent for context %d.\n", i);
+ continue;
+ }
+
+ /*
+ * Skip contexts other than external interrupts for our
+ * privilege level.
+ */
+ if (parent.args[0] != RV_IRQ_EXT) {
+ /* Disable S-mode enable bits if running in M-mode. */
+ if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
+ void __iomem *enable_base = priv->regs +
+ CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
+
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ __plic_toggle(enable_base, hwirq, 0);
+ }
+ continue;
+ }
+
+ error = riscv_of_parent_hartid(parent.np, &hartid);
+ if (error < 0) {
+ pr_warn("failed to parse hart ID for context %d.\n", i);
+ continue;
+ }
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("Invalid cpuid for context %d\n", i);
+ continue;
+ }
+
+ /* Find parent domain and register chained handler */
+ if (!plic_parent_irq && irq_find_host(parent.np)) {
+ plic_parent_irq = irq_of_parse_and_map(node, i);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq,
+ plic_handle_irq);
+ }
+
+ /*
+ * When running in M-mode we need to ignore the S-mode handler.
+ * Here we assume it always comes later, but that might be a
+ * little fragile.
+ */
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler already present for context %d.\n", i);
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+ goto done;
+ }
+
+ cpumask_set_cpu(cpu, &priv->lmask);
+ handler->present = true;
+ handler->hart_base = priv->regs + CONTEXT_BASE +
+ i * CONTEXT_SIZE;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
+ handler->priv = priv;
+done:
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
+ plic_toggle(handler, hwirq, 0);
+ writel(1, priv->regs + PRIORITY_BASE +
+ hwirq * PRIORITY_PER_ID);
+ }
+ nr_handlers++;
+ }
+
+ /*
+ * We can have multiple PLIC instances so setup cpuhp state only
+ * when context handler for current/boot CPU is present.
+ */
+ handler = this_cpu_ptr(&plic_handlers);
+ if (handler->present && !plic_cpuhp_setup_done) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
+ plic_cpuhp_setup_done = true;
+ }
+
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+ return 0;
+
+out_iounmap:
+ iounmap(priv->regs);
+out_free_priv:
+ kfree(priv);
+ return error;
+}
+
+static int __init plic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return __plic_init(node, parent, 0);
+}
+
+IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
+IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+
+static int __init plic_edge_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
+}
+
+IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
new file mode 100644
index 000000000..fbb354413
--- /dev/null
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld interrupt controller driver
+ *
+ * Copyright 2020 Kontron Europe GmbH
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define INTC_IE 0x00
+#define INTC_IP 0x01
+
+static const struct regmap_irq sl28cpld_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 8),
+ REGMAP_IRQ_REG_LINE(1, 8),
+ REGMAP_IRQ_REG_LINE(2, 8),
+ REGMAP_IRQ_REG_LINE(3, 8),
+ REGMAP_IRQ_REG_LINE(4, 8),
+ REGMAP_IRQ_REG_LINE(5, 8),
+ REGMAP_IRQ_REG_LINE(6, 8),
+ REGMAP_IRQ_REG_LINE(7, 8),
+};
+
+struct sl28cpld_intc {
+ struct regmap *regmap;
+ struct regmap_irq_chip chip;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static int sl28cpld_intc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sl28cpld_intc *irqchip;
+ int irq;
+ u32 base;
+ int ret;
+
+ if (!dev->parent)
+ return -ENODEV;
+
+ irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL);
+ if (!irqchip)
+ return -ENOMEM;
+
+ irqchip->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!irqchip->regmap)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return -EINVAL;
+
+ irqchip->chip.name = "sl28cpld-intc";
+ irqchip->chip.irqs = sl28cpld_irqs;
+ irqchip->chip.num_irqs = ARRAY_SIZE(sl28cpld_irqs);
+ irqchip->chip.num_regs = 1;
+ irqchip->chip.status_base = base + INTC_IP;
+ irqchip->chip.mask_base = base + INTC_IE;
+ irqchip->chip.mask_invert = true;
+ irqchip->chip.ack_base = base + INTC_IP;
+
+ return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ irqchip->regmap, irq,
+ IRQF_SHARED | IRQF_ONESHOT, 0,
+ &irqchip->chip,
+ &irqchip->irq_data);
+}
+
+static const struct of_device_id sl28cpld_intc_of_match[] = {
+ { .compatible = "kontron,sl28cpld-intc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_intc_of_match);
+
+static struct platform_driver sl28cpld_intc_driver = {
+ .probe = sl28cpld_intc_probe,
+ .driver = {
+ .name = "sl28cpld-intc",
+ .of_match_table = sl28cpld_intc_of_match,
+ }
+};
+module_platform_driver(sl28cpld_intc_driver);
+
+MODULE_DESCRIPTION("sl28cpld Interrupt Controller Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 000000000..c7db617e1
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Socionext External Interrupt Unit (EXIU)
+ *
+ * Copyright (c) 2017-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on irq-tegra.c:
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_IRQS 32
+
+#define EIMASK 0x00
+#define EISRCSEL 0x04
+#define EIREQSTA 0x08
+#define EIRAWREQSTA 0x0C
+#define EIREQCLR 0x10
+#define EILVL 0x14
+#define EIEDG 0x18
+#define EISIR 0x1C
+
+struct exiu_irq_data {
+ void __iomem *base;
+ u32 spi_base;
+};
+
+static void exiu_irq_ack(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+}
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Level triggered interrupts are latched and must be cleared during
+ * EOI or the interrupt will be jammed on. Of course if a level
+ * triggered interrupt is still asserted then the write will not clear
+ * the interrupt.
+ */
+ if (irqd_is_level_type(d))
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+
+ irq_chip_eoi_parent(d);
+}
+
+static void exiu_irq_mask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_mask_parent(d);
+}
+
+static void exiu_irq_unmask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void exiu_irq_enable(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ /* clear interrupts that were latched while disabled */
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_enable_parent(d);
+}
+
+static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EILVL);
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ val |= BIT(d->hwirq);
+ else
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EILVL);
+
+ val = readl_relaxed(data->base + EIEDG);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
+ val &= ~BIT(d->hwirq);
+ irq_set_handler_locked(d, handle_fasteoi_irq);
+ } else {
+ val |= BIT(d->hwirq);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ }
+ writel_relaxed(val, data->base + EIEDG);
+
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip exiu_irq_chip = {
+ .name = "EXIU",
+ .irq_ack = exiu_irq_ack,
+ .irq_eoi = exiu_irq_eoi,
+ .irq_enable = exiu_irq_enable,
+ .irq_mask = exiu_irq_mask,
+ .irq_unmask = exiu_irq_unmask,
+ .irq_set_type = exiu_irq_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int exiu_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct exiu_irq_data *info = domain->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *hwirq = fwspec->param[1] - info->spi_base;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ } else {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ }
+ return 0;
+}
+
+static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct exiu_irq_data *info = dom->host_data;
+ irq_hw_number_t hwirq;
+
+ parent_fwspec = *fwspec;
+ if (is_of_node(dom->parent->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = fwspec->param[1] - info->spi_base;
+ } else {
+ hwirq = fwspec->param[0];
+ parent_fwspec.param[0] = hwirq + info->spi_base + 32;
+ }
+ WARN_ON(nr_irqs != 1);
+ irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
+
+ parent_fwspec.fwnode = dom->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops exiu_domain_ops = {
+ .translate = exiu_domain_translate,
+ .alloc = exiu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static struct exiu_irq_data *exiu_init(const struct fwnode_handle *fwnode,
+ struct resource *res)
+{
+ struct exiu_irq_data *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (fwnode_property_read_u32_array(fwnode, "socionext,spi-base",
+ &data->spi_base, 1)) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ data->base = ioremap(res->start, resource_size(res));
+ if (!data->base) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ /* clear and mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
+ writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+
+ return data;
+
+out_free:
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+static int __init exiu_dt_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ struct resource res;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("%pOF: failed to parse memory resource\n", node);
+ return -ENXIO;
+ }
+
+ data = exiu_init(of_node_to_fwnode(node), &res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
+ &exiu_domain_ops, data);
+ if (!domain) {
+ pr_err("%pOF: failed to allocate domain\n", node);
+ goto out_unmap;
+ }
+
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
+ parent);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+ kfree(data);
+ return -ENOMEM;
+}
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_dt_init);
+
+#ifdef CONFIG_ACPI
+static int exiu_acpi_probe(struct platform_device *pdev)
+{
+ struct irq_domain *domain;
+ struct exiu_irq_data *data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to parse memory resource\n");
+ return -ENXIO;
+ }
+
+ data = exiu_init(dev_fwnode(&pdev->dev), res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev),
+ &exiu_domain_ops, data);
+ if (!domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out_unmap;
+ }
+
+ dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+ kfree(data);
+ return -ENOMEM;
+}
+
+static const struct acpi_device_id exiu_acpi_ids[] = {
+ { "SCX0008" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, exiu_acpi_ids);
+
+static struct platform_driver exiu_driver = {
+ .driver = {
+ .name = "exiu",
+ .acpi_match_table = exiu_acpi_ids,
+ },
+ .probe = exiu_acpi_probe,
+};
+builtin_platform_driver(exiu_driver);
+#endif
diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
new file mode 100644
index 000000000..bed78d1de
--- /dev/null
+++ b/drivers/irqchip/irq-sp7021-intc.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define SP_INTC_HWIRQ_MIN 0
+#define SP_INTC_HWIRQ_MAX 223
+
+#define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1)
+#define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32)
+#define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4)
+
+/* REG_GROUP_0 regs */
+#define REG_INTR_TYPE (sp_intc.g0)
+#define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE)
+#define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE)
+#define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE)
+
+/* REG_GROUP_1 regs */
+#define REG_INTR_CLEAR (sp_intc.g1)
+#define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE)
+#define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE)
+#define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4)
+
+#define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1)
+#define GROUP_SHIFT_EXT1 (0)
+#define GROUP_SHIFT_EXT0 (8)
+
+/*
+ * When GPIO_INT0~7 set to edge trigger, doesn't work properly.
+ * WORKAROUND: change it to level trigger, and toggle the polarity
+ * at ACK/Handler to make the HW work.
+ */
+#define GPIO_INT0_HWIRQ 120
+#define GPIO_INT7_HWIRQ 127
+#define IS_GPIO_INT(irq) \
+({ \
+ u32 i = irq; \
+ (i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \
+})
+
+/* index of states */
+enum {
+ _IS_EDGE = 0,
+ _IS_LOW,
+ _IS_ACTIVE
+};
+
+#define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx))
+#define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v)
+#define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states)
+
+static struct sp_intctl {
+ /*
+ * REG_GROUP_0: include type/polarity/priority/mask regs.
+ * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs.
+ */
+ void __iomem *g0; // REG_GROUP_0 base
+ void __iomem *g1; // REG_GROUP_1 base
+
+ struct irq_domain *domain;
+ raw_spinlock_t lock;
+
+ /*
+ * store GPIO_INT states
+ * each interrupt has 3 states: is_edge, is_low, is_active
+ */
+ DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3);
+} sp_intc;
+
+static struct irq_chip sp_intc_chip;
+
+static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value)
+{
+ u32 offset, mask;
+ unsigned long flags;
+ void __iomem *reg;
+
+ offset = (hwirq / 32) * 4;
+ reg = base + offset;
+
+ raw_spin_lock_irqsave(&sp_intc.lock, flags);
+ mask = readl_relaxed(reg);
+ if (value)
+ mask |= BIT(hwirq % 32);
+ else
+ mask &= ~BIT(hwirq % 32);
+ writel_relaxed(mask, reg);
+ raw_spin_unlock_irqrestore(&sp_intc.lock, flags);
+}
+
+static void sp_intc_ack_irq(struct irq_data *d)
+{
+ u32 hwirq = d->hwirq;
+
+ if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW));
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, true);
+ }
+
+ sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1);
+}
+
+static void sp_intc_mask_irq(struct irq_data *d)
+{
+ sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0);
+}
+
+static void sp_intc_unmask_irq(struct irq_data *d)
+{
+ sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1);
+}
+
+static int sp_intc_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 hwirq = d->hwirq;
+ bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK);
+ bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING);
+
+ irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq);
+
+ if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND
+ /* store states */
+ ASSIGN_STATE(hwirq, _IS_EDGE, is_edge);
+ ASSIGN_STATE(hwirq, _IS_LOW, is_low);
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
+ /* change to level */
+ is_edge = false;
+ }
+
+ sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge);
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low);
+
+ return 0;
+}
+
+static int sp_intc_get_ext_irq(int ext_num)
+{
+ void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0;
+ u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0;
+ u32 groups;
+ u32 pending_group;
+ u32 group;
+ u32 pending_irq;
+
+ groups = readl_relaxed(REG_INTR_GROUP);
+ pending_group = (groups >> shift) & GROUP_MASK;
+ if (!pending_group)
+ return -1;
+
+ group = fls(pending_group) - 1;
+ pending_irq = readl_relaxed(base + group * 4);
+ if (!pending_irq)
+ return -1;
+
+ return (group * 32) + fls(pending_irq) - 1;
+}
+
+static void sp_intc_handle_ext_cascaded(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int ext_num = (uintptr_t)irq_desc_get_handler_data(desc);
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) {
+ if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND
+ ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
+ sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW));
+ } else {
+ generic_handle_domain_irq(sp_intc.domain, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip sp_intc_chip = {
+ .name = "sp_intc",
+ .irq_ack = sp_intc_ack_irq,
+ .irq_mask = sp_intc_mask_irq,
+ .irq_unmask = sp_intc_unmask_irq,
+ .irq_set_type = sp_intc_set_type,
+};
+
+static int sp_intc_irq_domain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
+ irq_set_chip_data(irq, &sp_intc_chip);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sp_intc_dm_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = sp_intc_irq_domain_map,
+};
+
+static int sp_intc_irq_map(struct device_node *node, int i)
+{
+ unsigned int irq;
+
+ irq = irq_of_parse_and_map(node, i);
+ if (!irq)
+ return -ENOENT;
+
+ irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i);
+
+ return 0;
+}
+
+static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
+{
+ int i, ret;
+
+ sp_intc.g0 = of_iomap(node, 0);
+ if (!sp_intc.g0)
+ return -ENXIO;
+
+ sp_intc.g1 = of_iomap(node, 1);
+ if (!sp_intc.g1) {
+ ret = -ENXIO;
+ goto out_unmap0;
+ }
+
+ ret = sp_intc_irq_map(node, 0); // EXT_INT0
+ if (ret)
+ goto out_unmap1;
+
+ ret = sp_intc_irq_map(node, 1); // EXT_INT1
+ if (ret)
+ goto out_unmap1;
+
+ /* initial regs */
+ for (i = 0; i < SP_INTC_NR_GROUPS; i++) {
+ /* all mask */
+ writel_relaxed(0, REG_INTR_MASK + i * 4);
+ /* all edge */
+ writel_relaxed(~0, REG_INTR_TYPE + i * 4);
+ /* all high-active */
+ writel_relaxed(0, REG_INTR_POLARITY + i * 4);
+ /* all EXT_INT0 */
+ writel_relaxed(~0, REG_INTR_PRIORITY + i * 4);
+ /* all clear */
+ writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
+ }
+
+ sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
+ &sp_intc_dm_ops, &sp_intc);
+ if (!sp_intc.domain) {
+ ret = -ENOMEM;
+ goto out_unmap1;
+ }
+
+ raw_spin_lock_init(&sp_intc.lock);
+
+ return 0;
+
+out_unmap1:
+ iounmap(sp_intc.g1);
+out_unmap0:
+ iounmap(sp_intc.g0);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);
diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c
new file mode 100644
index 000000000..801551e46
--- /dev/null
+++ b/drivers/irqchip/irq-st.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This is a re-write of Christophe Kerello's PMU driver.
+ */
+
+#include <dt-bindings/interrupt-controller/irq-st.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define STIH415_SYSCFG_642 0x0a8
+#define STIH416_SYSCFG_7543 0x87c
+#define STIH407_SYSCFG_5102 0x198
+#define STID127_SYSCFG_734 0x088
+
+#define ST_A9_IRQ_MASK 0x001FFFFF
+#define ST_A9_IRQ_MAX_CHANS 2
+
+#define ST_A9_IRQ_EN_CTI_0 BIT(0)
+#define ST_A9_IRQ_EN_CTI_1 BIT(1)
+#define ST_A9_IRQ_EN_PMU_0 BIT(2)
+#define ST_A9_IRQ_EN_PMU_1 BIT(3)
+#define ST_A9_IRQ_EN_PL310_L2 BIT(4)
+#define ST_A9_IRQ_EN_EXT_0 BIT(5)
+#define ST_A9_IRQ_EN_EXT_1 BIT(6)
+#define ST_A9_IRQ_EN_EXT_2 BIT(7)
+
+#define ST_A9_FIQ_N_SEL(dev, chan) (dev << (8 + (chan * 3)))
+#define ST_A9_IRQ_N_SEL(dev, chan) (dev << (14 + (chan * 3)))
+#define ST_A9_EXTIRQ_INV_SEL(dev) (dev << 20)
+
+struct st_irq_syscfg {
+ struct regmap *regmap;
+ unsigned int syscfg;
+ unsigned int config;
+ bool ext_inverted;
+};
+
+static const struct of_device_id st_irq_syscfg_match[] = {
+ {
+ .compatible = "st,stih415-irq-syscfg",
+ .data = (void *)STIH415_SYSCFG_642,
+ },
+ {
+ .compatible = "st,stih416-irq-syscfg",
+ .data = (void *)STIH416_SYSCFG_7543,
+ },
+ {
+ .compatible = "st,stih407-irq-syscfg",
+ .data = (void *)STIH407_SYSCFG_5102,
+ },
+ {
+ .compatible = "st,stid127-irq-syscfg",
+ .data = (void *)STID127_SYSCFG_734,
+ },
+ {}
+};
+
+static int st_irq_xlate(struct platform_device *pdev,
+ int device, int channel, bool irq)
+{
+ struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev);
+
+ /* Set the device enable bit. */
+ switch (device) {
+ case ST_IRQ_SYSCFG_EXT_0:
+ ddata->config |= ST_A9_IRQ_EN_EXT_0;
+ break;
+ case ST_IRQ_SYSCFG_EXT_1:
+ ddata->config |= ST_A9_IRQ_EN_EXT_1;
+ break;
+ case ST_IRQ_SYSCFG_EXT_2:
+ ddata->config |= ST_A9_IRQ_EN_EXT_2;
+ break;
+ case ST_IRQ_SYSCFG_CTI_0:
+ ddata->config |= ST_A9_IRQ_EN_CTI_0;
+ break;
+ case ST_IRQ_SYSCFG_CTI_1:
+ ddata->config |= ST_A9_IRQ_EN_CTI_1;
+ break;
+ case ST_IRQ_SYSCFG_PMU_0:
+ ddata->config |= ST_A9_IRQ_EN_PMU_0;
+ break;
+ case ST_IRQ_SYSCFG_PMU_1:
+ ddata->config |= ST_A9_IRQ_EN_PMU_1;
+ break;
+ case ST_IRQ_SYSCFG_pl310_L2:
+ ddata->config |= ST_A9_IRQ_EN_PL310_L2;
+ break;
+ case ST_IRQ_SYSCFG_DISABLED:
+ return 0;
+ default:
+ dev_err(&pdev->dev, "Unrecognised device %d\n", device);
+ return -EINVAL;
+ }
+
+ /* Select IRQ/FIQ channel for device. */
+ ddata->config |= irq ?
+ ST_A9_IRQ_N_SEL(device, channel) :
+ ST_A9_FIQ_N_SEL(device, channel);
+
+ return 0;
+}
+
+static int st_irq_syscfg_enable(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev);
+ int channels, ret, i;
+ u32 device, invert;
+
+ channels = of_property_count_u32_elems(np, "st,irq-device");
+ if (channels != ST_A9_IRQ_MAX_CHANS) {
+ dev_err(&pdev->dev, "st,enable-irq-device must have 2 elems\n");
+ return -EINVAL;
+ }
+
+ channels = of_property_count_u32_elems(np, "st,fiq-device");
+ if (channels != ST_A9_IRQ_MAX_CHANS) {
+ dev_err(&pdev->dev, "st,enable-fiq-device must have 2 elems\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ST_A9_IRQ_MAX_CHANS; i++) {
+ of_property_read_u32_index(np, "st,irq-device", i, &device);
+
+ ret = st_irq_xlate(pdev, device, i, true);
+ if (ret)
+ return ret;
+
+ of_property_read_u32_index(np, "st,fiq-device", i, &device);
+
+ ret = st_irq_xlate(pdev, device, i, false);
+ if (ret)
+ return ret;
+ }
+
+ /* External IRQs may be inverted. */
+ of_property_read_u32(np, "st,invert-ext", &invert);
+ ddata->config |= ST_A9_EXTIRQ_INV_SEL(invert);
+
+ return regmap_update_bits(ddata->regmap, ddata->syscfg,
+ ST_A9_IRQ_MASK, ddata->config);
+}
+
+static int st_irq_syscfg_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct st_irq_syscfg *ddata;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ match = of_match_device(st_irq_syscfg_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ ddata->syscfg = (unsigned int)match->data;
+
+ ddata->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(ddata->regmap)) {
+ dev_err(&pdev->dev, "syscfg phandle missing\n");
+ return PTR_ERR(ddata->regmap);
+ }
+
+ dev_set_drvdata(&pdev->dev, ddata);
+
+ return st_irq_syscfg_enable(pdev);
+}
+
+static int __maybe_unused st_irq_syscfg_resume(struct device *dev)
+{
+ struct st_irq_syscfg *ddata = dev_get_drvdata(dev);
+
+ return regmap_update_bits(ddata->regmap, ddata->syscfg,
+ ST_A9_IRQ_MASK, ddata->config);
+}
+
+static SIMPLE_DEV_PM_OPS(st_irq_syscfg_pm_ops, NULL, st_irq_syscfg_resume);
+
+static struct platform_driver st_irq_syscfg_driver = {
+ .driver = {
+ .name = "st_irq_syscfg",
+ .pm = &st_irq_syscfg_pm_ops,
+ .of_match_table = st_irq_syscfg_match,
+ },
+ .probe = st_irq_syscfg_probe,
+};
+
+static int __init st_irq_syscfg_init(void)
+{
+ return platform_driver_register(&st_irq_syscfg_driver);
+}
+core_initcall(st_irq_syscfg_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
new file mode 100644
index 000000000..dc6f67dec
--- /dev/null
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Copyright (C) STMicroelectronics 2017
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQS_PER_BANK 32
+
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+
+struct stm32_exti_bank {
+ u32 imr_ofst;
+ u32 emr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 rpr_ofst;
+ u32 fpr_ofst;
+ u32 trg_ofst;
+};
+
+#define UNDEF_REG ~0
+
+struct stm32_exti_drv_data {
+ const struct stm32_exti_bank **exti_banks;
+ const u8 *desc_irqs;
+ u32 bank_nr;
+};
+
+struct stm32_exti_chip_data {
+ struct stm32_exti_host_data *host_data;
+ const struct stm32_exti_bank *reg_bank;
+ struct raw_spinlock rlock;
+ u32 wake_active;
+ u32 mask_cache;
+ u32 rtsr_cache;
+ u32 ftsr_cache;
+};
+
+struct stm32_exti_host_data {
+ void __iomem *base;
+ struct stm32_exti_chip_data *chips_data;
+ const struct stm32_exti_drv_data *drv_data;
+ struct hwspinlock *hwlock;
+};
+
+static struct stm32_exti_host_data *stm32_host_data;
+
+static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
+ .imr_ofst = 0x00,
+ .emr_ofst = 0x04,
+ .rtsr_ofst = 0x08,
+ .ftsr_ofst = 0x0C,
+ .swier_ofst = 0x10,
+ .rpr_ofst = 0x14,
+ .fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
+};
+
+static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
+ &stm32f4xx_exti_b1,
+};
+
+static const struct stm32_exti_drv_data stm32f4xx_drv_data = {
+ .exti_banks = stm32f4xx_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks),
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
+ .imr_ofst = 0x80,
+ .emr_ofst = 0x84,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .rpr_ofst = 0x88,
+ .fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
+ .imr_ofst = 0x90,
+ .emr_ofst = 0x94,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .rpr_ofst = 0x98,
+ .fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .emr_ofst = 0xA4,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .rpr_ofst = 0xA8,
+ .fpr_ofst = UNDEF_REG,
+ .trg_ofst = UNDEF_REG,
+};
+
+static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
+ &stm32h7xx_exti_b1,
+ &stm32h7xx_exti_b2,
+ &stm32h7xx_exti_b3,
+};
+
+static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
+ .exti_banks = stm32h7xx_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b1 = {
+ .imr_ofst = 0x80,
+ .emr_ofst = UNDEF_REG,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .rpr_ofst = 0x0C,
+ .fpr_ofst = 0x10,
+ .trg_ofst = 0x3EC,
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b2 = {
+ .imr_ofst = 0x90,
+ .emr_ofst = UNDEF_REG,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .rpr_ofst = 0x2C,
+ .fpr_ofst = 0x30,
+ .trg_ofst = 0x3E8,
+};
+
+static const struct stm32_exti_bank stm32mp1_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .emr_ofst = UNDEF_REG,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .rpr_ofst = 0x4C,
+ .fpr_ofst = 0x50,
+ .trg_ofst = 0x3E4,
+};
+
+static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
+ &stm32mp1_exti_b1,
+ &stm32mp1_exti_b2,
+ &stm32mp1_exti_b3,
+};
+
+static struct irq_chip stm32_exti_h_chip;
+static struct irq_chip stm32_exti_h_chip_direct;
+
+#define EXTI_INVALID_IRQ U8_MAX
+#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
+
+/*
+ * Use some intentionally tricky logic here to initialize the whole array to
+ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
+ * that we "know" that there are overrides in this structure, and we'll need to
+ * disable that warning from W=1 builds.
+ */
+__diag_push();
+__diag_ignore_all("-Woverride-init",
+ "logic to initialize all and then override some is OK");
+
+static const u8 stm32mp1_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 23,
+ [6] = 64,
+ [7] = 65,
+ [8] = 66,
+ [9] = 67,
+ [10] = 40,
+ [11] = 42,
+ [12] = 76,
+ [13] = 77,
+ [14] = 121,
+ [15] = 127,
+ [16] = 1,
+ [19] = 3,
+ [21] = 31,
+ [22] = 33,
+ [23] = 72,
+ [24] = 95,
+ [25] = 107,
+ [26] = 37,
+ [27] = 38,
+ [28] = 39,
+ [29] = 71,
+ [30] = 52,
+ [31] = 53,
+ [32] = 82,
+ [33] = 83,
+ [47] = 93,
+ [48] = 138,
+ [50] = 139,
+ [52] = 140,
+ [53] = 141,
+ [54] = 135,
+ [61] = 100,
+ [65] = 144,
+ [68] = 143,
+ [70] = 62,
+ [73] = 129,
+};
+
+static const u8 stm32mp13_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 24,
+ [6] = 65,
+ [7] = 66,
+ [8] = 67,
+ [9] = 68,
+ [10] = 41,
+ [11] = 43,
+ [12] = 77,
+ [13] = 78,
+ [14] = 106,
+ [15] = 109,
+ [16] = 1,
+ [19] = 3,
+ [21] = 32,
+ [22] = 34,
+ [23] = 73,
+ [24] = 93,
+ [25] = 114,
+ [26] = 38,
+ [27] = 39,
+ [28] = 40,
+ [29] = 72,
+ [30] = 53,
+ [31] = 54,
+ [32] = 83,
+ [33] = 84,
+ [44] = 96,
+ [47] = 92,
+ [48] = 116,
+ [50] = 117,
+ [52] = 118,
+ [53] = 119,
+ [68] = 63,
+ [70] = 98,
+};
+
+__diag_pop();
+
+static const struct stm32_exti_drv_data stm32mp1_drv_data = {
+ .exti_banks = stm32mp1_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+ .desc_irqs = stm32mp1_desc_irq,
+};
+
+static const struct stm32_exti_drv_data stm32mp13_drv_data = {
+ .exti_banks = stm32mp1_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+ .desc_irqs = stm32mp13_desc_irq,
+};
+
+static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
+{
+ struct stm32_exti_chip_data *chip_data = gc->private;
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ unsigned long pending;
+
+ pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
+ if (stm32_bank->fpr_ofst != UNDEF_REG)
+ pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
+
+ return pending;
+}
+
+static void stm32_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int nbanks = domain->gc->num_chips;
+ struct irq_chip_generic *gc;
+ unsigned long pending;
+ int n, i, irq_base = 0;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
+ gc = irq_get_domain_generic_chip(domain, irq_base);
+
+ while ((pending = stm32_exti_pending(gc))) {
+ for_each_set_bit(n, &pending, IRQS_PER_BANK)
+ generic_handle_domain_irq(domain, irq_base + n);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int stm32_exti_set_type(struct irq_data *d,
+ unsigned int type, u32 *rtsr, u32 *ftsr)
+{
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ *rtsr |= mask;
+ *ftsr &= ~mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ *rtsr &= ~mask;
+ *ftsr |= mask;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ *rtsr |= mask;
+ *ftsr |= mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct stm32_exti_chip_data *chip_data = gc->private;
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
+ u32 rtsr, ftsr;
+ int err;
+
+ irq_gc_lock(gc);
+
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
+
+ rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
+ ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
+
+ err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
+ if (err)
+ goto unspinlock;
+
+ irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
+ irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
+
+unspinlock:
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
+unlock:
+ irq_gc_unlock(gc);
+
+ return err;
+}
+
+static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
+ u32 wake_active)
+{
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* save rtsr, ftsr registers */
+ chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst);
+ chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst);
+
+ writel_relaxed(wake_active, base + stm32_bank->imr_ofst);
+}
+
+static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data,
+ u32 mask_cache)
+{
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* restore rtsr, ftsr, registers */
+ writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst);
+ writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst);
+
+ writel_relaxed(mask_cache, base + stm32_bank->imr_ofst);
+}
+
+static void stm32_irq_suspend(struct irq_chip_generic *gc)
+{
+ struct stm32_exti_chip_data *chip_data = gc->private;
+
+ irq_gc_lock(gc);
+ stm32_chip_suspend(chip_data, gc->wake_active);
+ irq_gc_unlock(gc);
+}
+
+static void stm32_irq_resume(struct irq_chip_generic *gc)
+{
+ struct stm32_exti_chip_data *chip_data = gc->private;
+
+ irq_gc_lock(gc);
+ stm32_chip_resume(chip_data, gc->mask_cache);
+ irq_gc_unlock(gc);
+}
+
+static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ irq_hw_number_t hwirq;
+
+ hwirq = fwspec->param[0];
+
+ irq_map_generic_chip(d, virq, hwirq);
+
+ return 0;
+}
+
+static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(d, virq);
+
+ irq_domain_reset_irq_data(data);
+}
+
+static const struct irq_domain_ops irq_exti_domain_ops = {
+ .map = irq_map_generic_chip,
+ .alloc = stm32_exti_alloc,
+ .free = stm32_exti_free,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void stm32_irq_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct stm32_exti_chip_data *chip_data = gc->private;
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+ irq_gc_lock(gc);
+
+ irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
+ if (stm32_bank->fpr_ofst != UNDEF_REG)
+ irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
+
+ irq_gc_unlock(gc);
+}
+
+/* directly set the target bit without reading first. */
+static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
+static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val |= BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val &= ~BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static void stm32_exti_h_eoi(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
+ if (stm32_bank->fpr_ofst != UNDEF_REG)
+ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_eoi_parent(d);
+}
+
+static void stm32_exti_h_mask(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_mask_parent(d);
+}
+
+static void stm32_exti_h_unmask(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_unmask_parent(d);
+}
+
+static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
+ void __iomem *base = chip_data->host_data->base;
+ u32 rtsr, ftsr;
+ int err;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
+
+ rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
+ ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
+
+ err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
+ if (err)
+ goto unspinlock;
+
+ writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
+ writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
+
+unspinlock:
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
+unlock:
+ raw_spin_unlock(&chip_data->rlock);
+
+ return err;
+}
+
+static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (on)
+ chip_data->wake_active |= mask;
+ else
+ chip_data->wake_active &= ~mask;
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ return 0;
+}
+
+static int stm32_exti_h_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ if (d->parent_data->chip)
+ return irq_chip_set_affinity_parent(d, dest, force);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static int __maybe_unused stm32_exti_h_suspend(void)
+{
+ struct stm32_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
+ chip_data = &stm32_host_data->chips_data[i];
+ raw_spin_lock(&chip_data->rlock);
+ stm32_chip_suspend(chip_data, chip_data->wake_active);
+ raw_spin_unlock(&chip_data->rlock);
+ }
+
+ return 0;
+}
+
+static void __maybe_unused stm32_exti_h_resume(void)
+{
+ struct stm32_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
+ chip_data = &stm32_host_data->chips_data[i];
+ raw_spin_lock(&chip_data->rlock);
+ stm32_chip_resume(chip_data, chip_data->mask_cache);
+ raw_spin_unlock(&chip_data->rlock);
+ }
+}
+
+static struct syscore_ops stm32_exti_h_syscore_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = stm32_exti_h_suspend,
+ .resume = stm32_exti_h_resume,
+#endif
+};
+
+static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
+{
+ stm32_host_data = host_data;
+ register_syscore_ops(&stm32_exti_h_syscore_ops);
+}
+
+static void stm32_exti_h_syscore_deinit(void)
+{
+ unregister_syscore_ops(&stm32_exti_h_syscore_ops);
+}
+
+static int stm32_exti_h_retrigger(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(mask, base + stm32_bank->swier_ofst);
+
+ return 0;
+}
+
+static struct irq_chip stm32_exti_h_chip = {
+ .name = "stm32-exti-h",
+ .irq_eoi = stm32_exti_h_eoi,
+ .irq_mask = stm32_exti_h_mask,
+ .irq_unmask = stm32_exti_h_unmask,
+ .irq_retrigger = stm32_exti_h_retrigger,
+ .irq_set_type = stm32_exti_h_set_type,
+ .irq_set_wake = stm32_exti_h_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
+};
+
+static struct irq_chip stm32_exti_h_chip_direct = {
+ .name = "stm32-exti-h-direct",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = stm32_exti_h_mask,
+ .irq_unmask = stm32_exti_h_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = stm32_exti_h_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
+};
+
+static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct stm32_exti_host_data *host_data = dm->host_data;
+ struct stm32_exti_chip_data *chip_data;
+ u8 desc_irq;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec p_fwspec;
+ irq_hw_number_t hwirq;
+ int bank;
+ u32 event_trg;
+ struct irq_chip *chip;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
+ return -EINVAL;
+
+ bank = hwirq / IRQS_PER_BANK;
+ chip_data = &host_data->chips_data[bank];
+
+ event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
+ chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
+ &stm32_exti_h_chip : &stm32_exti_h_chip_direct;
+
+ irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
+
+ if (!host_data->drv_data->desc_irqs)
+ return -EINVAL;
+
+ desc_irq = host_data->drv_data->desc_irqs[hwirq];
+ if (desc_irq != EXTI_INVALID_IRQ) {
+ p_fwspec.fwnode = dm->parent->fwnode;
+ p_fwspec.param_count = 3;
+ p_fwspec.param[0] = GIC_SPI;
+ p_fwspec.param[1] = desc_irq;
+ p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
+ return 0;
+}
+
+static struct
+stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
+ struct device_node *node)
+{
+ struct stm32_exti_host_data *host_data;
+
+ host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return NULL;
+
+ host_data->drv_data = dd;
+ host_data->chips_data = kcalloc(dd->bank_nr,
+ sizeof(struct stm32_exti_chip_data),
+ GFP_KERNEL);
+ if (!host_data->chips_data)
+ goto free_host_data;
+
+ host_data->base = of_iomap(node, 0);
+ if (!host_data->base) {
+ pr_err("%pOF: Unable to map registers\n", node);
+ goto free_chips_data;
+ }
+
+ stm32_host_data = host_data;
+
+ return host_data;
+
+free_chips_data:
+ kfree(host_data->chips_data);
+free_host_data:
+ kfree(host_data);
+
+ return NULL;
+}
+
+static struct
+stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
+ u32 bank_idx,
+ struct device_node *node)
+{
+ const struct stm32_exti_bank *stm32_bank;
+ struct stm32_exti_chip_data *chip_data;
+ void __iomem *base = h_data->base;
+
+ stm32_bank = h_data->drv_data->exti_banks[bank_idx];
+ chip_data = &h_data->chips_data[bank_idx];
+ chip_data->host_data = h_data;
+ chip_data->reg_bank = stm32_bank;
+
+ raw_spin_lock_init(&chip_data->rlock);
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + stm32_bank->imr_ofst);
+ if (stm32_bank->emr_ofst != UNDEF_REG)
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
+
+ pr_info("%pOF: bank%d\n", node, bank_idx);
+
+ return chip_data;
+}
+
+static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
+ struct device_node *node)
+{
+ struct stm32_exti_host_data *host_data;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int nr_irqs, ret, i;
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+
+ host_data = stm32_exti_host_init(drv_data, node);
+ if (!host_data)
+ return -ENOMEM;
+
+ domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
+ &irq_exti_domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOFn: Could not register interrupt domain.\n",
+ node);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
+ handle_edge_irq, clr, 0, 0);
+ if (ret) {
+ pr_err("%pOF: Could not allocate generic interrupt chip.\n",
+ node);
+ goto out_free_domain;
+ }
+
+ for (i = 0; i < drv_data->bank_nr; i++) {
+ const struct stm32_exti_bank *stm32_bank;
+ struct stm32_exti_chip_data *chip_data;
+
+ stm32_bank = drv_data->exti_banks[i];
+ chip_data = stm32_exti_chip_init(host_data, i, node);
+
+ gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
+
+ gc->reg_base = host_data->base;
+ gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types->chip.irq_ack = stm32_irq_ack;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
+ gc->chip_types->chip.irq_set_wake = irq_gc_set_wake;
+ gc->suspend = stm32_irq_suspend;
+ gc->resume = stm32_irq_resume;
+ gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK);
+
+ gc->chip_types->regs.mask = stm32_bank->imr_ofst;
+ gc->private = (void *)chip_data;
+ }
+
+ nr_irqs = of_irq_count(node);
+ for (i = 0; i < nr_irqs; i++) {
+ unsigned int irq = irq_of_parse_and_map(node, i);
+
+ irq_set_handler_data(irq, domain);
+ irq_set_chained_handler(irq, stm32_irq_handler);
+ }
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(domain);
+out_unmap:
+ iounmap(host_data->base);
+ kfree(host_data->chips_data);
+ kfree(host_data);
+ return ret;
+}
+
+static const struct irq_domain_ops stm32_exti_h_domain_ops = {
+ .alloc = stm32_exti_h_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void stm32_exti_remove_irq(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
+}
+
+static int stm32_exti_remove(struct platform_device *pdev)
+{
+ stm32_exti_h_syscore_deinit();
+ return 0;
+}
+
+static int stm32_exti_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent_domain, *domain;
+ struct stm32_exti_host_data *host_data;
+ const struct stm32_exti_drv_data *drv_data;
+
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
+
+ /* check for optional hwspinlock which may be not available yet */
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret == -EPROBE_DEFER)
+ /* hwspinlock framework not yet ready */
+ return ret;
+
+ if (ret >= 0) {
+ host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
+ if (!host_data->hwlock) {
+ dev_err(dev, "Failed to request hwspinlock\n");
+ return -EINVAL;
+ }
+ } else if (ret != -ENOENT) {
+ /* note: ENOENT is a valid case (means 'no hwspinlock') */
+ dev_err(dev, "Failed to get hwspinlock\n");
+ return ret;
+ }
+
+ /* initialize host_data */
+ drv_data = of_device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "no of match data\n");
+ return -ENODEV;
+ }
+ host_data->drv_data = drv_data;
+
+ host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
+ sizeof(*host_data->chips_data),
+ GFP_KERNEL);
+ if (!host_data->chips_data)
+ return -ENOMEM;
+
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host_data->base))
+ return PTR_ERR(host_data->base);
+
+ for (i = 0; i < drv_data->bank_nr; i++)
+ stm32_exti_chip_init(host_data, i, np);
+
+ parent_domain = irq_find_host(of_irq_find_parent(np));
+ if (!parent_domain) {
+ dev_err(dev, "GIC interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0,
+ drv_data->bank_nr * IRQS_PER_BANK,
+ np, &stm32_exti_h_domain_ops,
+ host_data);
+
+ if (!domain) {
+ dev_err(dev, "Could not register exti domain\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
+ if (ret)
+ return ret;
+
+ stm32_exti_h_syscore_init(host_data);
+
+ return 0;
+}
+
+/* platform driver only for MP1 */
+static const struct of_device_id stm32_exti_ids[] = {
+ { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+
+static struct platform_driver stm32_exti_driver = {
+ .probe = stm32_exti_probe,
+ .remove = stm32_exti_remove,
+ .driver = {
+ .name = "stm32_exti",
+ .of_match_table = stm32_exti_ids,
+ },
+};
+
+static int __init stm32_exti_arch_init(void)
+{
+ return platform_driver_register(&stm32_exti_driver);
+}
+
+static void __exit stm32_exti_arch_exit(void)
+{
+ return platform_driver_unregister(&stm32_exti_driver);
+}
+
+arch_initcall(stm32_exti_arch_init);
+module_exit(stm32_exti_arch_exit);
+
+/* no platform driver for F4 and H7 */
+static int __init stm32f4_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(&stm32f4xx_drv_data, np);
+}
+
+IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
+
+static int __init stm32h7_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(&stm32h7xx_drv_data, np);
+}
+
+IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
new file mode 100644
index 000000000..dd506ebfd
--- /dev/null
+++ b/drivers/irqchip/irq-sun4i.c
@@ -0,0 +1,202 @@
+/*
+ * Allwinner A1X SoCs IRQ chip driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Benn Huang <benn@allwinnertech.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+
+#define SUN4I_IRQ_VECTOR_REG 0x00
+#define SUN4I_IRQ_PROTECTION_REG 0x08
+#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
+#define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
+#define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG(data, x) ((data)->enable_reg_offset + 0x4 * x)
+#define SUN4I_IRQ_MASK_REG(data, x) ((data)->mask_reg_offset + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG_OFFSET 0x40
+#define SUN4I_IRQ_MASK_REG_OFFSET 0x50
+#define SUNIV_IRQ_ENABLE_REG_OFFSET 0x20
+#define SUNIV_IRQ_MASK_REG_OFFSET 0x30
+
+struct sun4i_irq_chip_data {
+ void __iomem *irq_base;
+ struct irq_domain *irq_domain;
+ u32 enable_reg_offset;
+ u32 mask_reg_offset;
+};
+
+static struct sun4i_irq_chip_data *irq_ic_data;
+
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+
+static void sun4i_irq_ack(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+
+ if (irq != 0)
+ return; /* Only IRQ 0 / the ENMI needs to be acked */
+
+ writel(BIT(0), irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0));
+}
+
+static void sun4i_irq_mask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
+ writel(val & ~(1 << irq_off),
+ irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
+}
+
+static void sun4i_irq_unmask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
+ writel(val | (1 << irq_off),
+ irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
+}
+
+static struct irq_chip sun4i_irq_chip = {
+ .name = "sun4i_irq",
+ .irq_eoi = sun4i_irq_ack,
+ .irq_mask = sun4i_irq_mask,
+ .irq_unmask = sun4i_irq_unmask,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+};
+
+static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sun4i_irq_ops = {
+ .map = sun4i_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init sun4i_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ irq_ic_data->irq_base = of_iomap(node, 0);
+ if (!irq_ic_data->irq_base)
+ panic("%pOF: unable to map IC registers\n",
+ node);
+
+ /* Disable all interrupts */
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 0));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 1));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 2));
+
+ /* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 0));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 1));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 2));
+
+ /* Clear all the pending interrupts */
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0));
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(1));
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(2));
+
+ /* Enable protection mode */
+ writel(0x01, irq_ic_data->irq_base + SUN4I_IRQ_PROTECTION_REG);
+
+ /* Configure the external interrupt source type */
+ writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG);
+
+ irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32,
+ &sun4i_irq_ops, NULL);
+ if (!irq_ic_data->irq_domain)
+ panic("%pOF: unable to create IRQ domain\n", node);
+
+ set_handle_irq(sun4i_handle_irq);
+
+ return 0;
+}
+
+static int __init sun4i_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
+ if (!irq_ic_data)
+ return -ENOMEM;
+
+ irq_ic_data->enable_reg_offset = SUN4I_IRQ_ENABLE_REG_OFFSET;
+ irq_ic_data->mask_reg_offset = SUN4I_IRQ_MASK_REG_OFFSET;
+
+ return sun4i_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_ic_of_init);
+
+static int __init suniv_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
+ if (!irq_ic_data)
+ return -ENOMEM;
+
+ irq_ic_data->enable_reg_offset = SUNIV_IRQ_ENABLE_REG_OFFSET;
+ irq_ic_data->mask_reg_offset = SUNIV_IRQ_MASK_REG_OFFSET;
+
+ return sun4i_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(allwinner_sunvi_ic, "allwinner,suniv-f1c100s-ic",
+ suniv_ic_of_init);
+
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+{
+ u32 hwirq;
+
+ /*
+ * hwirq == 0 can mean one of 3 things:
+ * 1) no more irqs pending
+ * 2) irq 0 pending
+ * 3) spurious irq
+ * So if we immediately get a reading of 0, check the irq-pending reg
+ * to differentiate between 2 and 3. We only do this once to avoid
+ * the extra check in the common case of 1 happening after having
+ * read the vector-reg once.
+ */
+ hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+ if (hwirq == 0 &&
+ !(readl(irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0)) &
+ BIT(0)))
+ return;
+
+ do {
+ generic_handle_domain_irq(irq_ic_data->irq_domain, hwirq);
+ hwirq = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_VECTOR_REG) >> 2;
+ } while (hwirq != 0);
+}
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
new file mode 100644
index 000000000..a01e44049
--- /dev/null
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The R_INTC in Allwinner A31 and newer SoCs manages several types of
+ * interrupts, as shown below:
+ *
+ * NMI IRQ DIRECT IRQs MUXED IRQs
+ * bit 0 bits 1-15^ bits 19-31
+ *
+ * +---------+ +---------+ +---------+ +---------+
+ * | NMI Pad | | IRQ d | | IRQ m | | IRQ m+7 |
+ * +---------+ +---------+ +---------+ +---------+
+ * | | | | | | |
+ * | | | | |......| |
+ * +------V------+ +------------+ | | | +--V------V--+ |
+ * | Invert/ | | Write 1 to | | | | | AND with | |
+ * | Edge Detect | | PENDING[0] | | | | | MUX[m/8] | |
+ * +-------------+ +------------+ | | | +------------+ |
+ * | | | | | | |
+ * +--V-------V--+ +--V--+ | +--V--+ | +--V--+
+ * | Set Reset| | GIC | | | GIC | | | GIC |
+ * | Latch | | SPI | | | SPI |... | ...| SPI |
+ * +-------------+ | N+d | | | m | | | m+7 |
+ * | | +-----+ | +-----+ | +-----+
+ * | | | |
+ * +-------V-+ +-V----------+ +---------V--+ +--------V--------+
+ * | GIC SPI | | AND with | | AND with | | AND with |
+ * | N (=32) | | ENABLE[0] | | ENABLE[d] | | ENABLE[19+m/8] |
+ * +---------+ +------------+ +------------+ +-----------------+
+ * | | |
+ * +------V-----+ +------V-----+ +--------V--------+
+ * | Read | | Read | | Read |
+ * | PENDING[0] | | PENDING[d] | | PENDING[19+m/8] |
+ * +------------+ +------------+ +-----------------+
+ *
+ * ^ bits 16-18 are direct IRQs for peripherals with banked interrupts, such as
+ * the MSGBOX. These IRQs do not map to any GIC SPI.
+ *
+ * The H6 variant adds two more (banked) direct IRQs and implements the full
+ * set of 128 mux bits. This requires a second set of top-level registers.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define SUN6I_NMI_CTRL (0x0c)
+#define SUN6I_IRQ_PENDING(n) (0x10 + 4 * (n))
+#define SUN6I_IRQ_ENABLE(n) (0x40 + 4 * (n))
+#define SUN6I_MUX_ENABLE(n) (0xc0 + 4 * (n))
+
+#define SUN6I_NMI_SRC_TYPE_LEVEL_LOW 0
+#define SUN6I_NMI_SRC_TYPE_EDGE_FALLING 1
+#define SUN6I_NMI_SRC_TYPE_LEVEL_HIGH 2
+#define SUN6I_NMI_SRC_TYPE_EDGE_RISING 3
+
+#define SUN6I_NMI_BIT BIT(0)
+
+#define SUN6I_NMI_NEEDS_ACK ((void *)1)
+
+#define SUN6I_NR_TOP_LEVEL_IRQS 64
+#define SUN6I_NR_DIRECT_IRQS 16
+#define SUN6I_NR_MUX_BITS 128
+
+struct sun6i_r_intc_variant {
+ u32 first_mux_irq;
+ u32 nr_mux_irqs;
+ u32 mux_valid[BITS_TO_U32(SUN6I_NR_MUX_BITS)];
+};
+
+static void __iomem *base;
+static irq_hw_number_t nmi_hwirq;
+static DECLARE_BITMAP(wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
+static DECLARE_BITMAP(wake_mux_enabled, SUN6I_NR_MUX_BITS);
+static DECLARE_BITMAP(wake_mux_valid, SUN6I_NR_MUX_BITS);
+
+static void sun6i_r_intc_ack_nmi(void)
+{
+ writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_PENDING(0));
+}
+
+static void sun6i_r_intc_nmi_ack(struct irq_data *data)
+{
+ if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+ sun6i_r_intc_ack_nmi();
+ else
+ data->chip_data = SUN6I_NMI_NEEDS_ACK;
+}
+
+static void sun6i_r_intc_nmi_eoi(struct irq_data *data)
+{
+ /* For oneshot IRQs, delay the ack until the IRQ is unmasked. */
+ if (data->chip_data == SUN6I_NMI_NEEDS_ACK && !irqd_irq_masked(data)) {
+ data->chip_data = NULL;
+ sun6i_r_intc_ack_nmi();
+ }
+
+ irq_chip_eoi_parent(data);
+}
+
+static void sun6i_r_intc_nmi_unmask(struct irq_data *data)
+{
+ if (data->chip_data == SUN6I_NMI_NEEDS_ACK) {
+ data->chip_data = NULL;
+ sun6i_r_intc_ack_nmi();
+ }
+
+ irq_chip_unmask_parent(data);
+}
+
+static int sun6i_r_intc_nmi_set_type(struct irq_data *data, unsigned int type)
+{
+ u32 nmi_src_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(nmi_src_type, base + SUN6I_NMI_CTRL);
+
+ /*
+ * The "External NMI" GIC input connects to a latch inside R_INTC, not
+ * directly to the pin. So the GIC trigger type does not depend on the
+ * NMI pin trigger type.
+ */
+ return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static int sun6i_r_intc_nmi_set_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ if (which == IRQCHIP_STATE_PENDING && !state)
+ sun6i_r_intc_ack_nmi();
+
+ return irq_chip_set_parent_state(data, which, state);
+}
+
+static int sun6i_r_intc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ unsigned long offset_from_nmi = data->hwirq - nmi_hwirq;
+
+ if (offset_from_nmi < SUN6I_NR_DIRECT_IRQS)
+ assign_bit(offset_from_nmi, wake_irq_enabled, on);
+ else if (test_bit(data->hwirq, wake_mux_valid))
+ assign_bit(data->hwirq, wake_mux_enabled, on);
+ else
+ /* Not wakeup capable. */
+ return -EPERM;
+
+ return 0;
+}
+
+static struct irq_chip sun6i_r_intc_nmi_chip = {
+ .name = "sun6i-r-intc",
+ .irq_ack = sun6i_r_intc_nmi_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = sun6i_r_intc_nmi_unmask,
+ .irq_eoi = sun6i_r_intc_nmi_eoi,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = sun6i_r_intc_nmi_set_type,
+ .irq_set_irqchip_state = sun6i_r_intc_nmi_set_irqchip_state,
+ .irq_set_wake = sun6i_r_intc_irq_set_wake,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static struct irq_chip sun6i_r_intc_wakeup_chip = {
+ .name = "sun6i-r-intc",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = sun6i_r_intc_irq_set_wake,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int sun6i_r_intc_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* Accept the old two-cell binding for the NMI only. */
+ if (fwspec->param_count == 2 && fwspec->param[0] == 0) {
+ *hwirq = nmi_hwirq;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ /* Otherwise this binding should match the GIC SPI binding. */
+ if (fwspec->param_count < 3)
+ return -EINVAL;
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int sun6i_r_intc_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec gic_fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int i, ret;
+
+ ret = sun6i_r_intc_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+ if (hwirq + nr_irqs > SUN6I_NR_MUX_BITS)
+ return -EINVAL;
+
+ /* Construct a GIC-compatible fwspec from this fwspec. */
+ gic_fwspec = (struct irq_fwspec) {
+ .fwnode = domain->parent->fwnode,
+ .param_count = 3,
+ .param = { GIC_SPI, hwirq, type },
+ };
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; ++i, ++hwirq, ++virq) {
+ if (hwirq == nmi_hwirq) {
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &sun6i_r_intc_nmi_chip,
+ NULL);
+ irq_set_handler(virq, handle_fasteoi_ack_irq);
+ } else {
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &sun6i_r_intc_wakeup_chip,
+ NULL);
+ }
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
+ .translate = sun6i_r_intc_domain_translate,
+ .alloc = sun6i_r_intc_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int sun6i_r_intc_suspend(void)
+{
+ u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
+ int i;
+
+ /* Wake IRQs are enabled during system sleep and shutdown. */
+ bitmap_to_arr32(buf, wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
+ for (i = 0; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
+ writel_relaxed(buf[i], base + SUN6I_IRQ_ENABLE(i));
+ bitmap_to_arr32(buf, wake_mux_enabled, SUN6I_NR_MUX_BITS);
+ for (i = 0; i < BITS_TO_U32(SUN6I_NR_MUX_BITS); ++i)
+ writel_relaxed(buf[i], base + SUN6I_MUX_ENABLE(i));
+
+ return 0;
+}
+
+static void sun6i_r_intc_resume(void)
+{
+ int i;
+
+ /* Only the NMI is relevant during normal operation. */
+ writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_ENABLE(0));
+ for (i = 1; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
+ writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i));
+}
+
+static void sun6i_r_intc_shutdown(void)
+{
+ sun6i_r_intc_suspend();
+}
+
+static struct syscore_ops sun6i_r_intc_syscore_ops = {
+ .suspend = sun6i_r_intc_suspend,
+ .resume = sun6i_r_intc_resume,
+ .shutdown = sun6i_r_intc_shutdown,
+};
+
+static int __init sun6i_r_intc_init(struct device_node *node,
+ struct device_node *parent,
+ const struct sun6i_r_intc_variant *v)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct of_phandle_args nmi_parent;
+ int ret;
+
+ /* Extract the NMI hwirq number from the OF node. */
+ ret = of_irq_parse_one(node, 0, &nmi_parent);
+ if (ret)
+ return ret;
+ if (nmi_parent.args_count < 3 ||
+ nmi_parent.args[0] != GIC_SPI ||
+ nmi_parent.args[2] != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ nmi_hwirq = nmi_parent.args[1];
+
+ bitmap_set(wake_irq_enabled, v->first_mux_irq, v->nr_mux_irqs);
+ bitmap_from_arr32(wake_mux_valid, v->mux_valid, SUN6I_NR_MUX_BITS);
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: Failed to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ base = of_io_request_and_map(node, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("%pOF: Failed to map MMIO region\n", node);
+ return PTR_ERR(base);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node,
+ &sun6i_r_intc_domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOF: Failed to allocate domain\n", node);
+ iounmap(base);
+ return -ENOMEM;
+ }
+
+ register_syscore_ops(&sun6i_r_intc_syscore_ops);
+
+ sun6i_r_intc_ack_nmi();
+ sun6i_r_intc_resume();
+
+ return 0;
+}
+
+static const struct sun6i_r_intc_variant sun6i_a31_r_intc_variant __initconst = {
+ .first_mux_irq = 19,
+ .nr_mux_irqs = 13,
+ .mux_valid = { 0xffffffff, 0xfff80000, 0xffffffff, 0x0000000f },
+};
+
+static int __init sun6i_a31_r_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sun6i_r_intc_init(node, parent, &sun6i_a31_r_intc_variant);
+}
+IRQCHIP_DECLARE(sun6i_a31_r_intc, "allwinner,sun6i-a31-r-intc", sun6i_a31_r_intc_init);
+
+static const struct sun6i_r_intc_variant sun50i_h6_r_intc_variant __initconst = {
+ .first_mux_irq = 21,
+ .nr_mux_irqs = 16,
+ .mux_valid = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff },
+};
+
+static int __init sun50i_h6_r_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sun6i_r_intc_init(node, parent, &sun50i_h6_r_intc_variant);
+}
+IRQCHIP_DECLARE(sun50i_h6_r_intc, "allwinner,sun50i-h6-r-intc", sun50i_h6_r_intc_init);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
new file mode 100644
index 000000000..21d49791f
--- /dev/null
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -0,0 +1,241 @@
+/*
+ * Allwinner A20/A31 SoCs NMI IRQ chip driver.
+ *
+ * Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define DRV_NAME "sunxi-nmi"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
+
+#define SUNXI_NMI_IRQ_BIT BIT(0)
+
+/*
+ * For deprecated sun6i-a31-sc-nmi compatible.
+ */
+#define SUN6I_NMI_CTRL 0x00
+#define SUN6I_NMI_PENDING 0x04
+#define SUN6I_NMI_ENABLE 0x34
+
+#define SUN7I_NMI_CTRL 0x00
+#define SUN7I_NMI_PENDING 0x04
+#define SUN7I_NMI_ENABLE 0x08
+
+#define SUN9I_NMI_CTRL 0x00
+#define SUN9I_NMI_ENABLE 0x04
+#define SUN9I_NMI_PENDING 0x08
+
+enum {
+ SUNXI_SRC_TYPE_LEVEL_LOW = 0,
+ SUNXI_SRC_TYPE_EDGE_FALLING,
+ SUNXI_SRC_TYPE_LEVEL_HIGH,
+ SUNXI_SRC_TYPE_EDGE_RISING,
+};
+
+struct sunxi_sc_nmi_reg_offs {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
+ .ctrl = SUN6I_NMI_CTRL,
+ .pend = SUN6I_NMI_PENDING,
+ .enable = SUN6I_NMI_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
+ .ctrl = SUN7I_NMI_CTRL,
+ .pend = SUN7I_NMI_PENDING,
+ .enable = SUN7I_NMI_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
+ .ctrl = SUN9I_NMI_CTRL,
+ .pend = SUN9I_NMI_PENDING,
+ .enable = SUN9I_NMI_ENABLE,
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+ u32 val)
+{
+ irq_reg_writel(gc, val, off);
+}
+
+static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+{
+ return irq_reg_readl(gc, off);
+}
+
+static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ generic_handle_domain_irq(domain, 0);
+ chained_irq_exit(chip, desc);
+}
+
+static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = gc->chip_types;
+ u32 src_type_reg;
+ u32 ctrl_off = ct->regs.type;
+ unsigned int src_type;
+ unsigned int i;
+
+ irq_gc_lock(gc);
+
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ src_type = SUNXI_SRC_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_LEVEL_LOW:
+ src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
+ break;
+ default:
+ irq_gc_unlock(gc);
+ pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
+ data->irq);
+ return -EBADR;
+ }
+
+ irqd_set_trigger_type(data, flow_type);
+ irq_setup_alt_chip(data, flow_type);
+
+ for (i = 0; i < gc->num_ct; i++, ct++)
+ if (ct->type & flow_type)
+ ctrl_off = ct->regs.type;
+
+ src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
+ src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
+ src_type_reg |= src_type;
+ sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
+
+ irq_gc_unlock(gc);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ const struct sunxi_sc_nmi_reg_offs *reg_offs)
+{
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ unsigned int irq;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int ret;
+
+
+ domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("Could not register interrupt domain.\n");
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
+ handle_fasteoi_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("Could not allocate generic interrupt chip.\n");
+ goto fail_irqd_remove;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("unable to parse irq\n");
+ ret = -EINVAL;
+ goto fail_irqd_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(gc->reg_base)) {
+ pr_err("unable to map resource\n");
+ ret = PTR_ERR(gc->reg_base);
+ goto fail_irqd_remove;
+ }
+
+ gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].regs.ack = reg_offs->pend;
+ gc->chip_types[0].regs.mask = reg_offs->enable;
+ gc->chip_types[0].regs.type = reg_offs->ctrl;
+
+ gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[1].regs.ack = reg_offs->pend;
+ gc->chip_types[1].regs.mask = reg_offs->enable;
+ gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].handler = handle_edge_irq;
+
+ /* Disable any active interrupts */
+ sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+
+ /* Clear any pending NMI interrupts */
+ sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+
+ irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
+
+ return 0;
+
+fail_irqd_remove:
+ irq_domain_remove(domain);
+
+ return ret;
+}
+
+static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
+
+static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+}
+IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
+
+static int __init sun9i_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+}
+IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
new file mode 100644
index 000000000..8a0e69298
--- /dev/null
+++ b/drivers/irqchip/irq-tb10x.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems interrupt controller driver
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#define AB_IRQCTL_INT_ENABLE 0x00
+#define AB_IRQCTL_INT_STATUS 0x04
+#define AB_IRQCTL_SRC_MODE 0x08
+#define AB_IRQCTL_SRC_POLARITY 0x0C
+#define AB_IRQCTL_INT_MODE 0x10
+#define AB_IRQCTL_INT_POLARITY 0x14
+#define AB_IRQCTL_INT_FORCE 0x18
+
+#define AB_IRQCTL_MAXIRQ 32
+
+static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
+ u32 val)
+{
+ irq_reg_writel(gc, val, reg);
+}
+
+static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
+{
+ return irq_reg_readl(gc, reg);
+}
+
+static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ uint32_t im, mod, pol;
+
+ im = data->mask;
+
+ irq_gc_lock(gc);
+
+ mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
+ pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
+
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ pol ^= im;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ mod ^= im;
+ break;
+ case IRQ_TYPE_NONE:
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ fallthrough;
+ case IRQ_TYPE_LEVEL_LOW:
+ mod ^= im;
+ pol ^= im;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ break;
+ default:
+ irq_gc_unlock(gc);
+ pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
+ __func__, data->irq);
+ return -EBADR;
+ }
+
+ irqd_set_trigger_type(data, flow_type);
+ irq_setup_alt_chip(data, flow_type);
+
+ ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
+ ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
+ ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
+
+ irq_gc_unlock(gc);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void tb10x_irq_cascade(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
+
+ generic_handle_domain_irq(domain, irq);
+}
+
+static int __init of_tb10x_init_irq(struct device_node *ictl,
+ struct device_node *parent)
+{
+ int i, ret, nrirqs = of_irq_count(ictl);
+ struct resource mem;
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ void __iomem *reg_base;
+
+ if (of_address_to_resource(ictl, 0, &mem)) {
+ pr_err("%pOFn: No registers declared in DeviceTree.\n",
+ ictl);
+ return -EINVAL;
+ }
+
+ if (!request_mem_region(mem.start, resource_size(&mem),
+ ictl->full_name)) {
+ pr_err("%pOFn: Request mem region failed.\n", ictl);
+ return -EBUSY;
+ }
+
+ reg_base = ioremap(mem.start, resource_size(&mem));
+ if (!reg_base) {
+ ret = -EBUSY;
+ pr_err("%pOFn: ioremap failed.\n", ictl);
+ goto ioremap_fail;
+ }
+
+ domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ ret = -ENOMEM;
+ pr_err("%pOFn: Could not register interrupt domain.\n",
+ ictl);
+ goto irq_domain_add_fail;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
+ 2, ictl->name, handle_level_irq,
+ IRQ_NOREQUEST, IRQ_NOPROBE,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%pOFn: Could not allocate generic interrupt chip.\n",
+ ictl);
+ goto gc_alloc_fail;
+ }
+
+ gc = domain->gc->gc[0];
+ gc->reg_base = reg_base;
+
+ gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_set_type = tb10x_irq_set_type;
+ gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
+
+ gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = tb10x_irq_set_type;
+ gc->chip_types[1].regs.ack = AB_IRQCTL_INT_STATUS;
+ gc->chip_types[1].regs.mask = AB_IRQCTL_INT_ENABLE;
+ gc->chip_types[1].handler = handle_edge_irq;
+
+ for (i = 0; i < nrirqs; i++) {
+ unsigned int irq = irq_of_parse_and_map(ictl, i);
+
+ irq_set_chained_handler_and_data(irq, tb10x_irq_cascade,
+ domain);
+ }
+
+ ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
+ ab_irqctl_writereg(gc, AB_IRQCTL_INT_MODE, 0);
+ ab_irqctl_writereg(gc, AB_IRQCTL_INT_POLARITY, 0);
+ ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, ~0UL);
+
+ return 0;
+
+gc_alloc_fail:
+ irq_domain_remove(domain);
+irq_domain_add_fail:
+ iounmap(reg_base);
+ioremap_fail:
+ release_mem_region(mem.start, resource_size(&mem));
+ return ret;
+}
+IRQCHIP_DECLARE(tb10x_intc, "abilis,tb10x-ictl", of_tb10x_init_irq);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
new file mode 100644
index 000000000..ad3e2c1b3
--- /dev/null
+++ b/drivers/irqchip/irq-tegra.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver code for Tegra's Legacy Interrupt Controller
+ *
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Heavily based on the original arch/arm/mach-tegra/irq.c code:
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define ICTLR_CPU_IEP_VFIQ 0x08
+#define ICTLR_CPU_IEP_FIR 0x14
+#define ICTLR_CPU_IEP_FIR_SET 0x18
+#define ICTLR_CPU_IEP_FIR_CLR 0x1c
+
+#define ICTLR_CPU_IER 0x20
+#define ICTLR_CPU_IER_SET 0x24
+#define ICTLR_CPU_IER_CLR 0x28
+#define ICTLR_CPU_IEP_CLASS 0x2C
+
+#define ICTLR_COP_IER 0x30
+#define ICTLR_COP_IER_SET 0x34
+#define ICTLR_COP_IER_CLR 0x38
+#define ICTLR_COP_IEP_CLASS 0x3c
+
+#define TEGRA_MAX_NUM_ICTLRS 6
+
+static unsigned int num_ictlrs;
+
+struct tegra_ictlr_soc {
+ unsigned int num_ictlrs;
+};
+
+static const struct tegra_ictlr_soc tegra20_ictlr_soc = {
+ .num_ictlrs = 4,
+};
+
+static const struct tegra_ictlr_soc tegra30_ictlr_soc = {
+ .num_ictlrs = 5,
+};
+
+static const struct tegra_ictlr_soc tegra210_ictlr_soc = {
+ .num_ictlrs = 6,
+};
+
+static const struct of_device_id ictlr_matches[] = {
+ { .compatible = "nvidia,tegra210-ictlr", .data = &tegra210_ictlr_soc },
+ { .compatible = "nvidia,tegra30-ictlr", .data = &tegra30_ictlr_soc },
+ { .compatible = "nvidia,tegra20-ictlr", .data = &tegra20_ictlr_soc },
+ { }
+};
+
+struct tegra_ictlr_info {
+ void __iomem *base[TEGRA_MAX_NUM_ICTLRS];
+#ifdef CONFIG_PM_SLEEP
+ u32 cop_ier[TEGRA_MAX_NUM_ICTLRS];
+ u32 cop_iep[TEGRA_MAX_NUM_ICTLRS];
+ u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS];
+ u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS];
+
+ u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS];
+#endif
+};
+
+static struct tegra_ictlr_info *lic;
+
+static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
+{
+ void __iomem *base = (void __iomem __force *)d->chip_data;
+ u32 mask;
+
+ mask = BIT(d->hwirq % 32);
+ writel_relaxed(mask, base + reg);
+}
+
+static void tegra_mask(struct irq_data *d)
+{
+ tegra_ictlr_write_mask(d, ICTLR_CPU_IER_CLR);
+ irq_chip_mask_parent(d);
+}
+
+static void tegra_unmask(struct irq_data *d)
+{
+ tegra_ictlr_write_mask(d, ICTLR_CPU_IER_SET);
+ irq_chip_unmask_parent(d);
+}
+
+static void tegra_eoi(struct irq_data *d)
+{
+ tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_CLR);
+ irq_chip_eoi_parent(d);
+}
+
+static int tegra_retrigger(struct irq_data *d)
+{
+ tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_SET);
+ return irq_chip_retrigger_hierarchy(d);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_set_wake(struct irq_data *d, unsigned int enable)
+{
+ u32 irq = d->hwirq;
+ u32 index, mask;
+
+ index = (irq / 32);
+ mask = BIT(irq % 32);
+ if (enable)
+ lic->ictlr_wake_mask[index] |= mask;
+ else
+ lic->ictlr_wake_mask[index] &= ~mask;
+
+ /*
+ * Do *not* call into the parent, as the GIC doesn't have any
+ * wake-up facility...
+ */
+ return 0;
+}
+
+static int tegra_ictlr_suspend(void)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < num_ictlrs; i++) {
+ void __iomem *ictlr = lic->base[i];
+
+ /* Save interrupt state */
+ lic->cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER);
+ lic->cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS);
+ lic->cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER);
+ lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
+
+ /* Disable COP interrupts */
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
+
+ /* Disable CPU interrupts */
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
+
+ /* Enable the wakeup sources of ictlr */
+ writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
+ }
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static void tegra_ictlr_resume(void)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < num_ictlrs; i++) {
+ void __iomem *ictlr = lic->base[i];
+
+ writel_relaxed(lic->cpu_iep[i],
+ ictlr + ICTLR_CPU_IEP_CLASS);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(lic->cpu_ier[i],
+ ictlr + ICTLR_CPU_IER_SET);
+ writel_relaxed(lic->cop_iep[i],
+ ictlr + ICTLR_COP_IEP_CLASS);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(lic->cop_ier[i],
+ ictlr + ICTLR_COP_IER_SET);
+ }
+ local_irq_restore(flags);
+}
+
+static struct syscore_ops tegra_ictlr_syscore_ops = {
+ .suspend = tegra_ictlr_suspend,
+ .resume = tegra_ictlr_resume,
+};
+
+static void tegra_ictlr_syscore_init(void)
+{
+ register_syscore_ops(&tegra_ictlr_syscore_ops);
+}
+#else
+#define tegra_set_wake NULL
+static inline void tegra_ictlr_syscore_init(void) {}
+#endif
+
+static struct irq_chip tegra_ictlr_chip = {
+ .name = "LIC",
+ .irq_eoi = tegra_eoi,
+ .irq_mask = tegra_mask,
+ .irq_unmask = tegra_unmask,
+ .irq_retrigger = tegra_retrigger,
+ .irq_set_wake = tegra_set_wake,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int tegra_ictlr_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct tegra_ictlr_info *info = domain->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int i;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = fwspec->param[1];
+ if (hwirq >= (num_ictlrs * 32))
+ return -EINVAL;
+
+ for (i = 0; i < nr_irqs; i++) {
+ int ictlr = (hwirq + i) / 32;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &tegra_ictlr_chip,
+ (void __force *)info->base[ictlr]);
+ }
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops tegra_ictlr_domain_ops = {
+ .translate = tegra_ictlr_domain_translate,
+ .alloc = tegra_ictlr_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init tegra_ictlr_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ const struct of_device_id *match;
+ const struct tegra_ictlr_soc *soc;
+ unsigned int i;
+ int err;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ match = of_match_node(ictlr_matches, node);
+ if (!match) /* Should never happen... */
+ return -ENODEV;
+
+ soc = match->data;
+
+ lic = kzalloc(sizeof(*lic), GFP_KERNEL);
+ if (!lic)
+ return -ENOMEM;
+
+ for (i = 0; i < TEGRA_MAX_NUM_ICTLRS; i++) {
+ void __iomem *base;
+
+ base = of_iomap(node, i);
+ if (!base)
+ break;
+
+ lic->base[i] = base;
+
+ /* Disable all interrupts */
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
+ /* All interrupts target IRQ */
+ writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
+
+ num_ictlrs++;
+ }
+
+ if (!num_ictlrs) {
+ pr_err("%pOF: no valid regions, giving up\n", node);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ WARN(num_ictlrs != soc->num_ictlrs,
+ "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
+ node, num_ictlrs, soc->num_ictlrs);
+
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
+ node, &tegra_ictlr_domain_ops,
+ lic);
+ if (!domain) {
+ pr_err("%pOF: failed to allocated domain\n", node);
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ tegra_ictlr_syscore_init();
+
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n",
+ node, num_ictlrs * 32, parent);
+
+ return 0;
+
+out_unmap:
+ for (i = 0; i < num_ictlrs; i++)
+ iounmap(lic->base[i]);
+out_free:
+ kfree(lic);
+ return err;
+}
+
+IRQCHIP_DECLARE(tegra20_ictlr, "nvidia,tegra20-ictlr", tegra_ictlr_init);
+IRQCHIP_DECLARE(tegra30_ictlr, "nvidia,tegra30-ictlr", tegra_ictlr_init);
+IRQCHIP_DECLARE(tegra210_ictlr, "nvidia,tegra210-ictlr", tegra_ictlr_init);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
new file mode 100644
index 000000000..5fdbb4358
--- /dev/null
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <asm-generic/msi.h>
+
+#define TI_SCI_DEV_ID_MASK 0xffff
+#define TI_SCI_DEV_ID_SHIFT 16
+#define TI_SCI_IRQ_ID_MASK 0xffff
+#define TI_SCI_IRQ_ID_SHIFT 0
+#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
+ (TI_SCI_DEV_ID_MASK))
+#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
+#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
+ TI_SCI_DEV_ID_SHIFT) | \
+ ((index) & TI_SCI_IRQ_ID_MASK))
+
+#define MAX_EVENTS_PER_VINT 64
+#define VINT_ENABLE_SET_OFFSET 0x0
+#define VINT_ENABLE_CLR_OFFSET 0x8
+#define VINT_STATUS_OFFSET 0x18
+#define VINT_STATUS_MASKED_OFFSET 0x20
+
+/**
+ * struct ti_sci_inta_event_desc - Description of an event coming to
+ * Interrupt Aggregator. This serves
+ * as a mapping table for global event,
+ * hwirq and vint bit.
+ * @global_event: Global event number corresponding to this event
+ * @hwirq: Hwirq of the incoming interrupt
+ * @vint_bit: Corresponding vint bit to which this event is attached.
+ */
+struct ti_sci_inta_event_desc {
+ u16 global_event;
+ u32 hwirq;
+ u8 vint_bit;
+};
+
+/**
+ * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out
+ * of Interrupt Aggregator.
+ * @domain: Pointer to IRQ domain to which this vint belongs.
+ * @list: List entry for the vint list
+ * @event_map: Bitmap to manage the allocation of events to vint.
+ * @events: Array of event descriptors assigned to this vint.
+ * @parent_virq: Linux IRQ number that gets attached to parent
+ * @vint_id: TISCI vint ID
+ */
+struct ti_sci_inta_vint_desc {
+ struct irq_domain *domain;
+ struct list_head list;
+ DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
+ struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
+ unsigned int parent_virq;
+ u16 vint_id;
+};
+
+/**
+ * struct ti_sci_inta_irq_domain - Structure representing a TISCI based
+ * Interrupt Aggregator IRQ domain.
+ * @sci: Pointer to TISCI handle
+ * @vint: TISCI resource pointer representing IA interrupts.
+ * @global_event: TISCI resource pointer representing global events.
+ * @vint_list: List of the vints active in the system
+ * @vint_mutex: Mutex to protect vint_list
+ * @base: Base address of the memory mapped IO registers
+ * @pdev: Pointer to platform device.
+ * @ti_sci_id: TI-SCI device identifier
+ * @unmapped_cnt: Number of @unmapped_dev_ids entries
+ * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of
+ * unmapped event sources.
+ * Unmapped Events are not part of the Global Event Map and
+ * they are converted to Global event within INTA to be
+ * received by the same INTA to generate an interrupt.
+ * In case an interrupt request comes for a device which is
+ * generating Unmapped Event, we must use the INTA's TI-SCI
+ * device identifier in place of the source device
+ * identifier to let sysfw know where it has to program the
+ * Global Event number.
+ */
+struct ti_sci_inta_irq_domain {
+ const struct ti_sci_handle *sci;
+ struct ti_sci_resource *vint;
+ struct ti_sci_resource *global_event;
+ struct list_head vint_list;
+ /* Mutex to protect vint list */
+ struct mutex vint_mutex;
+ void __iomem *base;
+ struct platform_device *pdev;
+ u32 ti_sci_id;
+
+ int unmapped_cnt;
+ u16 *unmapped_dev_ids;
+};
+
+#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
+ events[i])
+
+static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq)
+{
+ u16 dev_id = HWIRQ_TO_DEVID(hwirq);
+ int i;
+
+ if (inta->unmapped_cnt == 0)
+ return dev_id;
+
+ /*
+ * For devices sending Unmapped Events we must use the INTA's TI-SCI
+ * device identifier number to be able to convert it to a Global Event
+ * and map it to an interrupt.
+ */
+ for (i = 0; i < inta->unmapped_cnt; i++) {
+ if (dev_id == inta->unmapped_dev_ids[i]) {
+ dev_id = inta->ti_sci_id;
+ break;
+ }
+ }
+
+ return dev_id;
+}
+
+/**
+ * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
+ * @desc: Pointer to irq_desc corresponding to the irq
+ */
+static void ti_sci_inta_irq_handler(struct irq_desc *desc)
+{
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+ struct irq_domain *domain;
+ unsigned int bit;
+ unsigned long val;
+
+ vint_desc = irq_desc_get_handler_data(desc);
+ domain = vint_desc->domain;
+ inta = domain->host_data;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
+ VINT_STATUS_MASKED_OFFSET);
+
+ for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT)
+ generic_handle_domain_irq(domain, vint_desc->events[bit].hwirq);
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+/**
+ * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @inta: IRQ domain corresponding to Interrupt Aggregator
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
+ u16 vint_id)
+{
+ struct device_node *np = dev_of_node(&inta->pdev->dev);
+ u32 base, parent_base, size;
+ const __be32 *range;
+ int len;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return vint_id;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ parent_base = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= vint_id && vint_id < base + size)
+ return vint_id - base + parent_base;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
+ * @domain: IRQ domain corresponding to Interrupt Aggregator
+ *
+ * Return 0 if all went well else corresponding error value.
+ */
+static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
+{
+ struct ti_sci_inta_irq_domain *inta = domain->host_data;
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct irq_fwspec parent_fwspec;
+ struct device_node *parent_node;
+ unsigned int parent_virq;
+ int p_hwirq, ret;
+ u16 vint_id;
+
+ vint_id = ti_sci_get_free_resource(inta->vint);
+ if (vint_id == TI_SCI_RESOURCE_NULL)
+ return ERR_PTR(-EINVAL);
+
+ p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
+ if (p_hwirq < 0) {
+ ret = p_hwirq;
+ goto free_vint;
+ }
+
+ vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
+ if (!vint_desc) {
+ ret = -ENOMEM;
+ goto free_vint;
+ }
+
+ vint_desc->domain = domain;
+ vint_desc->vint_id = vint_id;
+ INIT_LIST_HEAD(&vint_desc->list);
+
+ parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
+ parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = p_hwirq - 32;
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+ } else {
+ /* Parent is Interrupt Router */
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = p_hwirq;
+ }
+
+ parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
+ if (parent_virq == 0) {
+ dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n");
+ ret = -EINVAL;
+ goto free_vint_desc;
+
+ }
+ vint_desc->parent_virq = parent_virq;
+
+ list_add_tail(&vint_desc->list, &inta->vint_list);
+ irq_set_chained_handler_and_data(vint_desc->parent_virq,
+ ti_sci_inta_irq_handler, vint_desc);
+
+ return vint_desc;
+free_vint_desc:
+ kfree(vint_desc);
+free_vint:
+ ti_sci_release_resource(inta->vint, vint_id);
+ return ERR_PTR(ret);
+}
+
+/**
+ * ti_sci_inta_alloc_event() - Attach an event to a IA vint.
+ * @vint_desc: Pointer to vint_desc to which the event gets attached
+ * @free_bit: Bit inside vint to which event gets attached
+ * @hwirq: hwirq of the input event
+ *
+ * Return event_desc pointer if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc,
+ u16 free_bit,
+ u32 hwirq)
+{
+ struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data;
+ struct ti_sci_inta_event_desc *event_desc;
+ u16 dev_id, dev_index;
+ int err;
+
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
+ dev_index = HWIRQ_TO_IRQID(hwirq);
+
+ event_desc = &vint_desc->events[free_bit];
+ event_desc->hwirq = hwirq;
+ event_desc->vint_bit = free_bit;
+ event_desc->global_event = ti_sci_get_free_resource(inta->global_event);
+ if (event_desc->global_event == TI_SCI_RESOURCE_NULL)
+ return ERR_PTR(-EINVAL);
+
+ err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
+ dev_id, dev_index,
+ inta->ti_sci_id,
+ vint_desc->vint_id,
+ event_desc->global_event,
+ free_bit);
+ if (err)
+ goto free_global_event;
+
+ return event_desc;
+free_global_event:
+ ti_sci_release_resource(inta->global_event, event_desc->global_event);
+ return ERR_PTR(err);
+}
+
+/**
+ * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain
+ * @domain: irq_domain pointer corresponding to INTA
+ * @hwirq: hwirq of the input event
+ *
+ * Note: Allocation happens in the following manner:
+ * - Find a free bit available in any of the vints available in the list.
+ * - If not found, allocate a vint from the vint pool
+ * - Attach the free bit to input hwirq.
+ * Return event_desc if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain,
+ u32 hwirq)
+{
+ struct ti_sci_inta_irq_domain *inta = domain->host_data;
+ struct ti_sci_inta_vint_desc *vint_desc = NULL;
+ struct ti_sci_inta_event_desc *event_desc;
+ u16 free_bit;
+
+ mutex_lock(&inta->vint_mutex);
+ list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ if (free_bit != MAX_EVENTS_PER_VINT) {
+ set_bit(free_bit, vint_desc->event_map);
+ goto alloc_event;
+ }
+ }
+
+ /* No free bits available. Allocate a new vint */
+ vint_desc = ti_sci_inta_alloc_parent_irq(domain);
+ if (IS_ERR(vint_desc)) {
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
+ }
+
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ set_bit(free_bit, vint_desc->event_map);
+
+alloc_event:
+ event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
+ if (IS_ERR(event_desc))
+ clear_bit(free_bit, vint_desc->event_map);
+
+unlock:
+ mutex_unlock(&inta->vint_mutex);
+ return event_desc;
+}
+
+/**
+ * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA
+ * @inta: Pointer to inta domain.
+ * @vint_desc: Pointer to vint_desc that needs to be freed.
+ */
+static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta,
+ struct ti_sci_inta_vint_desc *vint_desc)
+{
+ if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
+ list_del(&vint_desc->list);
+ ti_sci_release_resource(inta->vint, vint_desc->vint_id);
+ irq_dispose_mapping(vint_desc->parent_virq);
+ kfree(vint_desc);
+ }
+}
+
+/**
+ * ti_sci_inta_free_irq() - Free an IRQ within INTA domain
+ * @event_desc: Pointer to event_desc that needs to be freed.
+ * @hwirq: Hwirq number within INTA domain that needs to be freed
+ */
+static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
+ u32 hwirq)
+{
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+ u16 dev_id;
+
+ vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+ inta = vint_desc->domain->host_data;
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
+ /* free event irq */
+ mutex_lock(&inta->vint_mutex);
+ inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
+ dev_id, HWIRQ_TO_IRQID(hwirq),
+ inta->ti_sci_id,
+ vint_desc->vint_id,
+ event_desc->global_event,
+ event_desc->vint_bit);
+
+ clear_bit(event_desc->vint_bit, vint_desc->event_map);
+ ti_sci_release_resource(inta->global_event, event_desc->global_event);
+ event_desc->global_event = TI_SCI_RESOURCE_NULL;
+ event_desc->hwirq = 0;
+
+ ti_sci_inta_free_parent_irq(inta, vint_desc);
+ mutex_unlock(&inta->vint_mutex);
+}
+
+/**
+ * ti_sci_inta_request_resources() - Allocate resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: This is the core api where the actual allocation happens for input
+ * hwirq. This allocation involves creating a parent irq for vint.
+ * If this is done in irq_domain_ops.alloc() then a deadlock is reached
+ * for allocation. So this allocation is being done in request_resources()
+ *
+ * Return: 0 if all went well else corresponding error.
+ */
+static int ti_sci_inta_request_resources(struct irq_data *data)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+
+ event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq);
+ if (IS_ERR(event_desc))
+ return PTR_ERR(event_desc);
+
+ data->chip_data = event_desc;
+
+ return 0;
+}
+
+/**
+ * ti_sci_inta_release_resources - Release resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: Corresponding to request_resources(), all the unmapping and deletion
+ * of parent vint irqs happens in this api.
+ */
+static void ti_sci_inta_release_resources(struct irq_data *data)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+
+ event_desc = irq_data_get_irq_chip_data(data);
+ ti_sci_inta_free_irq(event_desc, data->hwirq);
+}
+
+/**
+ * ti_sci_inta_manage_event() - Control the event based on the offset
+ * @data: Pointer to corresponding irq_data
+ * @offset: register offset using which event is controlled.
+ */
+static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+
+ event_desc = irq_data_get_irq_chip_data(data);
+ vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+ inta = data->domain->host_data;
+
+ writeq_relaxed(BIT(event_desc->vint_bit),
+ inta->base + vint_desc->vint_id * 0x1000 + offset);
+}
+
+/**
+ * ti_sci_inta_mask_irq() - Mask an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_mask_irq(struct irq_data *data)
+{
+ ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET);
+}
+
+/**
+ * ti_sci_inta_unmask_irq() - Unmask an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_unmask_irq(struct irq_data *data)
+{
+ ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET);
+}
+
+/**
+ * ti_sci_inta_ack_irq() - Ack an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_ack_irq(struct irq_data *data)
+{
+ /*
+ * Do not clear the event if hardware is capable of sending
+ * a down event.
+ */
+ if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH)
+ ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
+}
+
+static int ti_sci_inta_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ return -EINVAL;
+}
+
+/**
+ * ti_sci_inta_set_type() - Update the trigger type of the irq.
+ * @data: Pointer to corresponding irq_data
+ * @type: Trigger type as specified by user
+ *
+ * Note: This updates the handle_irq callback for level msi.
+ *
+ * Return 0 if all went well else appropriate error.
+ */
+static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
+{
+ /*
+ * .alloc default sets handle_edge_irq. But if the user specifies
+ * that IRQ is level MSI, then update the handle to handle_level_irq
+ */
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQF_TRIGGER_HIGH:
+ irq_set_handler_locked(data, handle_level_irq);
+ return 0;
+ case IRQF_TRIGGER_RISING:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip ti_sci_inta_irq_chip = {
+ .name = "INTA",
+ .irq_ack = ti_sci_inta_ack_irq,
+ .irq_mask = ti_sci_inta_mask_irq,
+ .irq_set_type = ti_sci_inta_set_type,
+ .irq_unmask = ti_sci_inta_unmask_irq,
+ .irq_set_affinity = ti_sci_inta_set_affinity,
+ .irq_request_resources = ti_sci_inta_request_resources,
+ .irq_release_resources = ti_sci_inta_release_resources,
+};
+
+/**
+ * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain
+ * @domain: Domain to which the irqs belong
+ * @virq: base linux virtual IRQ to be freed.
+ * @nr_irqs: Number of continuous irqs to be freed
+ */
+static void ti_sci_inta_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+ irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs
+ * @domain: Point to the interrupt aggregator IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @nr_irqs: Continuous irqs to be allocated
+ * @data: Pointer to firmware specifier
+ *
+ * No actual allocation happens here.
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *data)
+{
+ msi_alloc_info_t *arg = data;
+
+ irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip,
+ NULL, handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = {
+ .free = ti_sci_inta_irq_domain_free,
+ .alloc = ti_sci_inta_irq_domain_alloc,
+};
+
+static struct irq_chip ti_sci_inta_msi_irq_chip = {
+ .name = "MSI-INTA",
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ struct platform_device *pdev = to_platform_device(desc->dev);
+
+ arg->desc = desc;
+ arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index);
+}
+
+static struct msi_domain_ops ti_sci_inta_msi_ops = {
+ .set_desc = ti_sci_inta_msi_set_desc,
+};
+
+static struct msi_domain_info ti_sci_inta_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_LEVEL_CAPABLE),
+ .ops = &ti_sci_inta_msi_ops,
+ .chip = &ti_sci_inta_msi_irq_chip,
+};
+
+static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta)
+{
+ struct device *dev = &inta->pdev->dev;
+ struct device_node *node = dev_of_node(dev);
+ struct of_phandle_iterator it;
+ int count, err, ret, i;
+
+ count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
+ if (count <= 0)
+ return 0;
+
+ inta->unmapped_dev_ids = devm_kcalloc(dev, count,
+ sizeof(*inta->unmapped_dev_ids),
+ GFP_KERNEL);
+ if (!inta->unmapped_dev_ids)
+ return -ENOMEM;
+
+ i = 0;
+ of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
+ u32 dev_id;
+
+ ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
+ of_node_put(it.node);
+ return ret;
+ }
+ inta->unmapped_dev_ids[i++] = dev_id;
+ }
+
+ inta->unmapped_cnt = count;
+
+ return 0;
+}
+
+static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
+{
+ struct irq_domain *parent_domain, *domain, *msi_domain;
+ struct device_node *parent_node, *node;
+ struct ti_sci_inta_irq_domain *inta;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ node = dev_of_node(dev);
+ parent_node = of_irq_find_parent(node);
+ if (!parent_node) {
+ dev_err(dev, "Failed to get IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL);
+ if (!inta)
+ return -ENOMEM;
+
+ inta->pdev = pdev;
+ inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(inta->sci))
+ return dev_err_probe(dev, PTR_ERR(inta->sci),
+ "ti,sci read fail\n");
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ return -EINVAL;
+ }
+
+ inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IA_VINT);
+ if (IS_ERR(inta->vint)) {
+ dev_err(dev, "VINT resource allocation failed\n");
+ return PTR_ERR(inta->vint);
+ }
+
+ inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT);
+ if (IS_ERR(inta->global_event)) {
+ dev_err(dev, "Global event resource allocation failed\n");
+ return PTR_ERR(inta->global_event);
+ }
+
+ inta->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(inta->base))
+ return PTR_ERR(inta->base);
+
+ ret = ti_sci_inta_get_unmapped_sources(inta);
+ if (ret)
+ return ret;
+
+ domain = irq_domain_add_linear(dev_of_node(dev),
+ ti_sci_get_num_resources(inta->vint),
+ &ti_sci_inta_irq_domain_ops, inta);
+ if (!domain) {
+ dev_err(dev, "Failed to allocate IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
+ &ti_sci_inta_msi_domain_info,
+ domain);
+ if (!msi_domain) {
+ irq_domain_remove(domain);
+ dev_err(dev, "Failed to allocate msi domain\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&inta->vint_list);
+ mutex_init(&inta->vint_mutex);
+
+ dev_info(dev, "Interrupt Aggregator domain %d created\n", inta->ti_sci_id);
+
+ return 0;
+}
+
+static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = {
+ { .compatible = "ti,sci-inta", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match);
+
+static struct platform_driver ti_sci_inta_irq_domain_driver = {
+ .probe = ti_sci_inta_irq_domain_probe,
+ .driver = {
+ .name = "ti-sci-inta",
+ .of_match_table = ti_sci_inta_irq_domain_of_match,
+ },
+};
+module_platform_driver(ti_sci_inta_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
+MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
new file mode 100644
index 000000000..020ddf29e
--- /dev/null
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Router irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_intr_irq_domain - Structure representing a TISCI based
+ * Interrupt Router IRQ domain.
+ * @sci: Pointer to TISCI handle
+ * @out_irqs: TISCI resource pointer representing INTR irqs.
+ * @dev: Struct device pointer.
+ * @ti_sci_id: TI-SCI device identifier
+ * @type: Specifies the trigger type supported by this Interrupt Router
+ */
+struct ti_sci_intr_irq_domain {
+ const struct ti_sci_handle *sci;
+ struct ti_sci_resource *out_irqs;
+ struct device *dev;
+ u32 ti_sci_id;
+ u32 type;
+};
+
+static struct irq_chip ti_sci_intr_irq_chip = {
+ .name = "INTR",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+/**
+ * ti_sci_intr_irq_domain_translate() - Retrieve hwirq and type from
+ * IRQ firmware specific handler.
+ * @domain: Pointer to IRQ domain
+ * @fwspec: Pointer to IRQ specific firmware structure
+ * @hwirq: IRQ number identified by hardware
+ * @type: IRQ type
+ *
+ * Return 0 if all went ok else appropriate error.
+ */
+static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+
+ if (fwspec->param_count != 1)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = intr->type;
+
+ return 0;
+}
+
+/**
+ * ti_sci_intr_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @intr: IRQ domain corresponding to Interrupt Router
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq)
+{
+ struct device_node *np = dev_of_node(intr->dev);
+ u32 base, pbase, size, len;
+ const __be32 *range;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return irq;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ pbase = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= irq && irq < base + size)
+ return irq - base + pbase;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
+ * @domain: Domain to which the irqs belong
+ * @virq: Linux virtual IRQ to be freed.
+ * @nr_irqs: Number of continuous irqs to be freed
+ */
+static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct irq_data *data;
+ int out_irq;
+
+ data = irq_domain_get_irq_data(domain, virq);
+ out_irq = (uintptr_t)data->chip_data;
+
+ intr->sci->ops.rm_irq_ops.free_irq(intr->sci,
+ intr->ti_sci_id, data->hwirq,
+ intr->ti_sci_id, out_irq);
+ ti_sci_release_resource(intr->out_irqs, out_irq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+ irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ
+ * @domain: Pointer to the interrupt router IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain
+ *
+ * Returns intr output irq if all went well else appropriate error pointer.
+ */
+static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
+ unsigned int virq, u32 hwirq)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct device_node *parent_node;
+ struct irq_fwspec fwspec;
+ int p_hwirq, err = 0;
+ u16 out_irq;
+
+ out_irq = ti_sci_get_free_resource(intr->out_irqs);
+ if (out_irq == TI_SCI_RESOURCE_NULL)
+ return -EINVAL;
+
+ p_hwirq = ti_sci_intr_xlate_irq(intr, out_irq);
+ if (p_hwirq < 0)
+ goto err_irqs;
+
+ parent_node = of_irq_find_parent(dev_of_node(intr->dev));
+ fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = p_hwirq - 32; /* SPI offset */
+ fwspec.param[2] = intr->type;
+ } else {
+ /* Parent is Interrupt Router */
+ fwspec.param_count = 1;
+ fwspec.param[0] = p_hwirq;
+ }
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ goto err_irqs;
+
+ err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci,
+ intr->ti_sci_id, hwirq,
+ intr->ti_sci_id, out_irq);
+ if (err)
+ goto err_msg;
+
+ return out_irq;
+
+err_msg:
+ irq_domain_free_irqs_parent(domain, virq, 1);
+err_irqs:
+ ti_sci_release_resource(intr->out_irqs, out_irq);
+ return err;
+}
+
+/**
+ * ti_sci_intr_irq_domain_alloc() - Allocate Interrupt router IRQs
+ * @domain: Point to the interrupt router IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @nr_irqs: Continuous irqs to be allocated
+ * @data: Pointer to firmware specifier
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ unsigned long hwirq;
+ unsigned int flags;
+ int err, out_irq;
+
+ err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
+ if (err)
+ return err;
+
+ out_irq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq);
+ if (out_irq < 0)
+ return out_irq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &ti_sci_intr_irq_chip,
+ (void *)(uintptr_t)out_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = {
+ .free = ti_sci_intr_irq_domain_free,
+ .alloc = ti_sci_intr_irq_domain_alloc,
+ .translate = ti_sci_intr_irq_domain_translate,
+};
+
+static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct ti_sci_intr_irq_domain *intr;
+ struct device_node *parent_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ parent_node = of_irq_find_parent(dev_of_node(dev));
+ if (!parent_node) {
+ dev_err(dev, "Failed to get IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent_node);
+ of_node_put(parent_node);
+ if (!parent_domain) {
+ dev_err(dev, "Failed to find IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ intr = devm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return -ENOMEM;
+
+ intr->dev = dev;
+ ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
+ &intr->type);
+ if (ret) {
+ dev_err(dev, "missing ti,intr-trigger-type property\n");
+ return -EINVAL;
+ }
+
+ intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(intr->sci))
+ return dev_err_probe(dev, PTR_ERR(intr->sci),
+ "ti,sci read fail\n");
+
+ ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id",
+ &intr->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ return -EINVAL;
+ }
+
+ intr->out_irqs = devm_ti_sci_get_resource(intr->sci, dev,
+ intr->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IR_OUTPUT);
+ if (IS_ERR(intr->out_irqs)) {
+ dev_err(dev, "Destination irq resource allocation failed\n");
+ return PTR_ERR(intr->out_irqs);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
+ &ti_sci_intr_irq_domain_ops, intr);
+ if (!domain) {
+ dev_err(dev, "Failed to allocate IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ dev_info(dev, "Interrupt Router %d domain created\n", intr->ti_sci_id);
+
+ return 0;
+}
+
+static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = {
+ { .compatible = "ti,sci-intr", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_intr_irq_domain_of_match);
+
+static struct platform_driver ti_sci_intr_irq_domain_driver = {
+ .probe = ti_sci_intr_irq_domain_probe,
+ .driver = {
+ .name = "ti-sci-intr",
+ .of_match_table = ti_sci_intr_irq_domain_of_match,
+ },
+};
+module_platform_driver(ti_sci_intr_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
new file mode 100644
index 000000000..b2d61d4f6
--- /dev/null
+++ b/drivers/irqchip/irq-ts4800.c
@@ -0,0 +1,169 @@
+/*
+ * Multiplexed-IRQs driver for TS-4800's FPGA
+ *
+ * Copyright (c) 2015 - Savoir-faire Linux
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#define IRQ_MASK 0x4
+#define IRQ_STATUS 0x8
+
+struct ts4800_irq_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct irq_domain *domain;
+};
+
+static void ts4800_irq_mask(struct irq_data *d)
+{
+ struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+ u16 reg = readw(data->base + IRQ_MASK);
+ u16 mask = 1 << d->hwirq;
+
+ writew(reg | mask, data->base + IRQ_MASK);
+}
+
+static void ts4800_irq_unmask(struct irq_data *d)
+{
+ struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+ u16 reg = readw(data->base + IRQ_MASK);
+ u16 mask = 1 << d->hwirq;
+
+ writew(reg & ~mask, data->base + IRQ_MASK);
+}
+
+static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, "%s", dev_name(&data->pdev->dev));
+}
+
+static const struct irq_chip ts4800_chip = {
+ .irq_mask = ts4800_irq_mask,
+ .irq_unmask = ts4800_irq_unmask,
+ .irq_print_chip = ts4800_irq_print_chip,
+};
+
+static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct ts4800_irq_data *data = d->host_data;
+
+ irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq);
+ irq_set_chip_data(irq, data);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ts4800_ic_ops = {
+ .map = ts4800_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void ts4800_ic_chained_handle_irq(struct irq_desc *desc)
+{
+ struct ts4800_irq_data *data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u16 status = readw(data->base + IRQ_STATUS);
+
+ chained_irq_enter(chip, desc);
+
+ if (unlikely(status == 0)) {
+ handle_bad_irq(desc);
+ goto out;
+ }
+
+ do {
+ unsigned int bit = __ffs(status);
+
+ generic_handle_domain_irq(data->domain, bit);
+ status &= ~(1 << bit);
+ } while (status);
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+static int ts4800_ic_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct ts4800_irq_data *data;
+ int parent_irq;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ writew(0xFFFF, data->base + IRQ_MASK);
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ dev_err(&pdev->dev, "failed to get parent IRQ\n");
+ return -EINVAL;
+ }
+
+ data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
+ if (!data->domain) {
+ dev_err(&pdev->dev, "cannot add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ ts4800_ic_chained_handle_irq, data);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int ts4800_ic_remove(struct platform_device *pdev)
+{
+ struct ts4800_irq_data *data = platform_get_drvdata(pdev);
+
+ irq_domain_remove(data->domain);
+
+ return 0;
+}
+
+static const struct of_device_id ts4800_ic_of_match[] = {
+ { .compatible = "technologic,ts4800-irqc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ts4800_ic_of_match);
+
+static struct platform_driver ts4800_ic_driver = {
+ .probe = ts4800_ic_probe,
+ .remove = ts4800_ic_remove,
+ .driver = {
+ .name = "ts4800-irqc",
+ .of_match_table = ts4800_ic_of_match,
+ },
+};
+module_platform_driver(ts4800_ic_driver);
+
+MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
new file mode 100644
index 000000000..716b1bb88
--- /dev/null
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for UniPhier AIDET (ARM Interrupt Detector)
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define UNIPHIER_AIDET_NR_IRQS 256
+
+#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
+
+struct uniphier_aidet_priv {
+ struct irq_domain *domain;
+ void __iomem *reg_base;
+ spinlock_t lock;
+ u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
+};
+
+static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
+ unsigned int reg, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tmp = readl_relaxed(priv->reg_base + reg);
+ tmp &= ~mask;
+ tmp |= mask & val;
+ writel_relaxed(tmp, priv->reg_base + reg);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
+ unsigned long index, unsigned int val)
+{
+ unsigned int reg;
+ u32 mask;
+
+ reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
+ mask = BIT(index % 32);
+
+ uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
+}
+
+static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct uniphier_aidet_priv *priv = data->chip_data;
+ unsigned int val;
+
+ /* enable inverter for active low triggers */
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = 1;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = 1;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uniphier_aidet_detconf_update(priv, data->hwirq, val);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip uniphier_aidet_irq_chip = {
+ .name = "AIDET",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = uniphier_aidet_irq_set_type,
+};
+
+static int uniphier_aidet_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+
+ *out_hwirq = fwspec->param[0];
+ *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (nr_irqs != 1)
+ return -EINVAL;
+
+ ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
+ return -ENXIO;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &uniphier_aidet_irq_chip,
+ domain->host_data);
+ if (ret)
+ return ret;
+
+ /* parent is GIC */
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0; /* SPI */
+ parent_fwspec.param[1] = hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops uniphier_aidet_domain_ops = {
+ .alloc = uniphier_aidet_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = uniphier_aidet_domain_translate,
+};
+
+static int uniphier_aidet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent_np;
+ struct irq_domain *parent_domain;
+ struct uniphier_aidet_priv *priv;
+
+ parent_np = of_irq_find_parent(dev->of_node);
+ if (!parent_np)
+ return -ENXIO;
+
+ parent_domain = irq_find_host(parent_np);
+ of_node_put(parent_np);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ spin_lock_init(&priv->lock);
+
+ priv->domain = irq_domain_create_hierarchy(
+ parent_domain, 0,
+ UNIPHIER_AIDET_NR_IRQS,
+ of_node_to_fwnode(dev->of_node),
+ &uniphier_aidet_domain_ops, priv);
+ if (!priv->domain)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
+{
+ struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+ priv->saved_vals[i] = readl_relaxed(
+ priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_aidet_resume(struct device *dev)
+{
+ struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+ writel_relaxed(priv->saved_vals[i],
+ priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_aidet_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
+ uniphier_aidet_resume)
+};
+
+static const struct of_device_id uniphier_aidet_match[] = {
+ { .compatible = "socionext,uniphier-ld4-aidet" },
+ { .compatible = "socionext,uniphier-pro4-aidet" },
+ { .compatible = "socionext,uniphier-sld8-aidet" },
+ { .compatible = "socionext,uniphier-pro5-aidet" },
+ { .compatible = "socionext,uniphier-pxs2-aidet" },
+ { .compatible = "socionext,uniphier-ld11-aidet" },
+ { .compatible = "socionext,uniphier-ld20-aidet" },
+ { .compatible = "socionext,uniphier-pxs3-aidet" },
+ { .compatible = "socionext,uniphier-nx1-aidet" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver uniphier_aidet_driver = {
+ .probe = uniphier_aidet_probe,
+ .driver = {
+ .name = "uniphier-aidet",
+ .of_match_table = uniphier_aidet_match,
+ .pm = &uniphier_aidet_pm_ops,
+ },
+};
+builtin_platform_driver(uniphier_aidet_driver);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
new file mode 100644
index 000000000..ba543ed9c
--- /dev/null
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Versatile FPGA-based IRQ controllers
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/seq_file.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IRQ_STATUS 0x00
+#define IRQ_RAW_STATUS 0x04
+#define IRQ_ENABLE_SET 0x08
+#define IRQ_ENABLE_CLEAR 0x0c
+#define INT_SOFT_SET 0x10
+#define INT_SOFT_CLEAR 0x14
+#define FIQ_STATUS 0x20
+#define FIQ_RAW_STATUS 0x24
+#define FIQ_ENABLE 0x28
+#define FIQ_ENABLE_SET 0x28
+#define FIQ_ENABLE_CLEAR 0x2C
+
+#define PIC_ENABLES 0x20 /* set interrupt pass through bits */
+
+/**
+ * struct fpga_irq_data - irq data container for the FPGA IRQ controller
+ * @base: memory offset in virtual memory
+ * @domain: IRQ domain for this instance
+ * @valid: mask for valid IRQs on this controller
+ * @used_irqs: number of active IRQs on this controller
+ */
+struct fpga_irq_data {
+ void __iomem *base;
+ u32 valid;
+ struct irq_domain *domain;
+ u8 used_irqs;
+};
+
+/* we cannot allocate memory when the controllers are initially registered */
+static struct fpga_irq_data fpga_irq_devices[CONFIG_VERSATILE_FPGA_IRQ_NR];
+static int fpga_irq_id;
+
+static void fpga_irq_mask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << d->hwirq;
+
+ writel(mask, f->base + IRQ_ENABLE_CLEAR);
+}
+
+static void fpga_irq_unmask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << d->hwirq;
+
+ writel(mask, f->base + IRQ_ENABLE_SET);
+}
+
+static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+
+ seq_printf(p, irq_domain_get_of_node(f->domain)->name);
+}
+
+static const struct irq_chip fpga_chip = {
+ .irq_ack = fpga_irq_mask,
+ .irq_mask = fpga_irq_mask,
+ .irq_unmask = fpga_irq_unmask,
+ .irq_print_chip = fpga_irq_print_chip,
+};
+
+static void fpga_irq_handle(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl(f->base + IRQ_STATUS);
+ if (status == 0) {
+ do_bad_IRQ(desc);
+ goto out;
+ }
+
+ do {
+ unsigned int irq = ffs(status) - 1;
+
+ status &= ~(1 << irq);
+ generic_handle_domain_irq(f->domain, irq);
+ } while (status);
+
+out:
+ chained_irq_exit(chip, desc);
+}
+
+/*
+ * Handle each interrupt in a single FPGA IRQ controller. Returns non-zero
+ * if we've handled at least one interrupt. This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
+{
+ int handled = 0;
+ int irq;
+ u32 status;
+
+ while ((status = readl(f->base + IRQ_STATUS))) {
+ irq = ffs(status) - 1;
+ generic_handle_domain_irq(f->domain, irq);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered FPGA IRQ controllers until there are
+ * no pending interrupts.
+ */
+static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < fpga_irq_id; ++i)
+ handled |= handle_one_fpga(&fpga_irq_devices[i], regs);
+ } while (handled);
+}
+
+static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct fpga_irq_data *f = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(f->valid & BIT(hwirq)))
+ return -EPERM;
+ irq_set_chip_data(irq, f);
+ irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq);
+ irq_set_probe(irq);
+ return 0;
+}
+
+static const struct irq_domain_ops fpga_irqdomain_ops = {
+ .map = fpga_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static void __init fpga_irq_init(void __iomem *base, int parent_irq,
+ u32 valid, struct device_node *node)
+{
+ struct fpga_irq_data *f;
+ int i;
+
+ if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) {
+ pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__);
+ return;
+ }
+ f = &fpga_irq_devices[fpga_irq_id];
+ f->base = base;
+ f->valid = valid;
+
+ if (parent_irq != -1) {
+ irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle,
+ f);
+ }
+
+ f->domain = irq_domain_add_linear(node, fls(valid),
+ &fpga_irqdomain_ops, f);
+
+ /* This will allocate all valid descriptors in the linear case */
+ for (i = 0; i < fls(valid); i++)
+ if (valid & BIT(i)) {
+ /* Is this still required? */
+ irq_create_mapping(f->domain, i);
+ f->used_irqs++;
+ }
+
+ pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
+ fpga_irq_id, node->name, base, f->used_irqs);
+ if (parent_irq != -1)
+ pr_cont(", parent IRQ: %d\n", parent_irq);
+ else
+ pr_cont("\n");
+
+ fpga_irq_id++;
+}
+
+#ifdef CONFIG_OF
+static int __init fpga_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *base;
+ u32 clear_mask;
+ u32 valid_mask;
+ int parent_irq;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ base = of_iomap(node, 0);
+ WARN(!base, "unable to map fpga irq registers\n");
+
+ if (of_property_read_u32(node, "clear-mask", &clear_mask))
+ clear_mask = 0;
+
+ if (of_property_read_u32(node, "valid-mask", &valid_mask))
+ valid_mask = 0;
+
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
+ /* Some chips are cascaded from a parent IRQ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ set_handle_irq(fpga_handle_irq);
+ parent_irq = -1;
+ }
+
+ fpga_irq_init(base, parent_irq, valid_mask, node);
+
+ /*
+ * On Versatile AB/PB, some secondary interrupts have a direct
+ * pass-thru to the primary controller for IRQs 20 and 22-31 which need
+ * to be enabled. See section 3.10 of the Versatile AB user guide.
+ */
+ if (of_device_is_compatible(node, "arm,versatile-sic"))
+ writel(0xffd00000, base + PIC_ENABLES);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
+IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
+IRQCHIP_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-irq", fpga_irq_of_init);
+#endif
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
new file mode 100644
index 000000000..2b9a8ba58
--- /dev/null
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2015 Toradex AG
+ * Author: Stefan Agner <stefan@agner.ch>
+ *
+ * IRQ chip driver for MSCM interrupt router available on Vybrid SoC's.
+ * The interrupt router is between the CPU's interrupt controller and the
+ * peripheral. The router allows to route the peripheral interrupts to
+ * one of the two available CPU's on Vybrid VF6xx SoC's (Cortex-A5 or
+ * Cortex-M4). The router will be configured transparently on a IRQ
+ * request.
+ *
+ * o All peripheral interrupts of the Vybrid SoC can be routed to
+ * CPU 0, CPU 1 or both. The routing is useful for dual-core
+ * variants of Vybrid SoC such as VF6xx. This driver routes the
+ * requested interrupt to the CPU currently running on.
+ *
+ * o It is required to setup the interrupt router even on single-core
+ * variants of Vybrid.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#define MSCM_CPxNUM 0x4
+
+#define MSCM_IRSPRC(n) (0x80 + 2 * (n))
+#define MSCM_IRSPRC_CPEN_MASK 0x3
+
+#define MSCM_IRSPRC_NUM 112
+
+struct vf610_mscm_ir_chip_data {
+ void __iomem *mscm_ir_base;
+ u16 cpu_mask;
+ u16 saved_irsprc[MSCM_IRSPRC_NUM];
+ bool is_nvic;
+};
+
+static struct vf610_mscm_ir_chip_data *mscm_ir_data;
+
+static inline void vf610_mscm_ir_save(struct vf610_mscm_ir_chip_data *data)
+{
+ int i;
+
+ for (i = 0; i < MSCM_IRSPRC_NUM; i++)
+ data->saved_irsprc[i] = readw_relaxed(data->mscm_ir_base + MSCM_IRSPRC(i));
+}
+
+static inline void vf610_mscm_ir_restore(struct vf610_mscm_ir_chip_data *data)
+{
+ int i;
+
+ for (i = 0; i < MSCM_IRSPRC_NUM; i++)
+ writew_relaxed(data->saved_irsprc[i], data->mscm_ir_base + MSCM_IRSPRC(i));
+}
+
+static int vf610_mscm_ir_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ vf610_mscm_ir_save(mscm_ir_data);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ vf610_mscm_ir_restore(mscm_ir_data);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mscm_ir_notifier_block = {
+ .notifier_call = vf610_mscm_ir_notifier,
+};
+
+static void vf610_mscm_ir_enable(struct irq_data *data)
+{
+ irq_hw_number_t hwirq = data->hwirq;
+ struct vf610_mscm_ir_chip_data *chip_data = data->chip_data;
+ u16 irsprc;
+
+ irsprc = readw_relaxed(chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+ irsprc &= MSCM_IRSPRC_CPEN_MASK;
+
+ WARN_ON(irsprc & ~chip_data->cpu_mask);
+
+ writew_relaxed(chip_data->cpu_mask,
+ chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+
+ irq_chip_enable_parent(data);
+}
+
+static void vf610_mscm_ir_disable(struct irq_data *data)
+{
+ irq_hw_number_t hwirq = data->hwirq;
+ struct vf610_mscm_ir_chip_data *chip_data = data->chip_data;
+
+ writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq));
+
+ irq_chip_disable_parent(data);
+}
+
+static struct irq_chip vf610_mscm_ir_irq_chip = {
+ .name = "mscm-ir",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_enable = vf610_mscm_ir_enable,
+ .irq_disable = vf610_mscm_ir_disable,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec parent_fwspec;
+
+ if (!irq_domain_get_of_node(domain->parent))
+ return -EINVAL;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ hwirq = fwspec->param[0];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &vf610_mscm_ir_irq_chip,
+ domain->host_data);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+
+ if (mscm_ir_data->is_nvic) {
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = fwspec->param[0];
+ } else {
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = fwspec->param[0];
+ parent_fwspec.param[2] = fwspec->param[1];
+ }
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static int vf610_mscm_ir_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static const struct irq_domain_ops mscm_irq_domain_ops = {
+ .translate = vf610_mscm_ir_domain_translate,
+ .alloc = vf610_mscm_ir_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init vf610_mscm_ir_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct regmap *mscm_cp_regmap;
+ int ret, cpuid;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("vf610_mscm_ir: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL);
+ if (!mscm_ir_data)
+ return -ENOMEM;
+
+ mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir");
+ if (IS_ERR(mscm_ir_data->mscm_ir_base)) {
+ pr_err("vf610_mscm_ir: unable to map mscm register\n");
+ ret = PTR_ERR(mscm_ir_data->mscm_ir_base);
+ goto out_free;
+ }
+
+ mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg");
+ if (IS_ERR(mscm_cp_regmap)) {
+ ret = PTR_ERR(mscm_cp_regmap);
+ pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n");
+ goto out_unmap;
+ }
+
+ regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid);
+ mscm_ir_data->cpu_mask = 0x1 << cpuid;
+
+ domain = irq_domain_add_hierarchy(domain_parent, 0,
+ MSCM_IRSPRC_NUM, node,
+ &mscm_irq_domain_ops, mscm_ir_data);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ if (of_device_is_compatible(irq_domain_get_of_node(domain->parent),
+ "arm,armv7m-nvic"))
+ mscm_ir_data->is_nvic = true;
+
+ cpu_pm_register_notifier(&mscm_ir_notifier_block);
+
+ return 0;
+
+out_unmap:
+ iounmap(mscm_ir_data->mscm_ir_base);
+out_free:
+ kfree(mscm_ir_data);
+ return ret;
+}
+IRQCHIP_DECLARE(vf610_mscm_ir, "fsl,vf610-mscm-ir", vf610_mscm_ir_of_init);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
new file mode 100644
index 000000000..9e3d5561e
--- /dev/null
+++ b/drivers/irqchip/irq-vic.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/arch/arm/common/vic.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/irqchip/arm-vic.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+#define VIC_IRQ_STATUS 0x00
+#define VIC_FIQ_STATUS 0x04
+#define VIC_RAW_STATUS 0x08
+#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
+#define VIC_INT_ENABLE_CLEAR 0x14
+#define VIC_INT_SOFT 0x18
+#define VIC_INT_SOFT_CLEAR 0x1c
+#define VIC_PROTECT 0x20
+#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
+#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
+
+#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
+#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
+#define VIC_ITCR 0x300 /* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE (1 << 5)
+
+#define VIC_PL192_VECT_ADDR 0xF00
+
+/**
+ * struct vic_device - VIC PM device
+ * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
+ * @irq: The IRQ number for the base of the VIC.
+ * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
+ * @resume_sources: A bitmask of interrupts for resume.
+ * @resume_irqs: The IRQs enabled for resume.
+ * @int_select: Save for VIC_INT_SELECT.
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
+ */
+struct vic_device {
+ void __iomem *base;
+ int irq;
+ u32 valid_sources;
+ u32 resume_sources;
+ u32 resume_irqs;
+ u32 int_select;
+ u32 int_enable;
+ u32 soft_int;
+ u32 protect;
+ struct irq_domain *domain;
+};
+
+/* we cannot allocate memory when VICs are initially registered */
+static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+static int vic_id;
+
+static void vic_handle_irq(struct pt_regs *regs);
+
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(VIC_VECT_CNTL_ENABLE | i, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
+
+#ifdef CONFIG_PM
+static void resume_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
+
+ /* re-initialise static settings */
+ vic_init2(base);
+
+ writel(vic->int_select, base + VIC_INT_SELECT);
+ writel(vic->protect, base + VIC_PROTECT);
+
+ /* set the enabled ints and then clear the non-enabled */
+ writel(vic->int_enable, base + VIC_INT_ENABLE);
+ writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
+
+ /* and the same for the soft-int register */
+
+ writel(vic->soft_int, base + VIC_INT_SOFT);
+ writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_resume(void)
+{
+ int id;
+
+ for (id = vic_id - 1; id >= 0; id--)
+ resume_one_vic(vic_devices + id);
+}
+
+static void suspend_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
+
+ vic->int_select = readl(base + VIC_INT_SELECT);
+ vic->int_enable = readl(base + VIC_INT_ENABLE);
+ vic->soft_int = readl(base + VIC_INT_SOFT);
+ vic->protect = readl(base + VIC_PROTECT);
+
+ /* set the interrupts (if any) that are used for
+ * resuming the system */
+
+ writel(vic->resume_irqs, base + VIC_INT_ENABLE);
+ writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static int vic_suspend(void)
+{
+ int id;
+
+ for (id = 0; id < vic_id; id++)
+ suspend_one_vic(vic_devices + id);
+
+ return 0;
+}
+
+static struct syscore_ops vic_syscore_ops = {
+ .suspend = vic_suspend,
+ .resume = vic_resume,
+};
+
+/**
+ * vic_pm_init - initcall to register VIC pm
+ *
+ * This is called via late_initcall() to register
+ * the resources for the VICs due to the early
+ * nature of the VIC's registration.
+*/
+static int __init vic_pm_init(void)
+{
+ if (vic_id > 0)
+ register_syscore_ops(&vic_syscore_ops);
+
+ return 0;
+}
+late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct vic_device *v = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(v->valid_sources & (1 << hwirq)))
+ return -EPERM;
+ irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+ irq_set_chip_data(irq, v->base);
+ irq_set_probe(irq);
+ return 0;
+}
+
+/*
+ * Handle each interrupt in a single VIC. Returns non-zero if we've
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+ u32 stat, irq;
+ int handled = 0;
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ irq = ffs(stat) - 1;
+ generic_handle_domain_irq(vic->domain, irq);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+static void vic_handle_irq_cascaded(struct irq_desc *desc)
+{
+ u32 stat, hwirq;
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ struct vic_device *vic = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(host_chip, desc);
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ hwirq = ffs(stat) - 1;
+ generic_handle_domain_irq(vic->domain, hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < vic_id; ++i)
+ handled |= handle_one_vic(&vic_devices[i], regs);
+ } while (handled);
+}
+
+static const struct irq_domain_ops vic_irqdomain_ops = {
+ .map = vic_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @parent_irq: The parent IRQ if cascaded, else 0.
+ * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
+ */
+static void __init vic_register(void __iomem *base, unsigned int parent_irq,
+ unsigned int irq,
+ u32 valid_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ struct vic_device *v;
+ int i;
+
+ if (vic_id >= ARRAY_SIZE(vic_devices)) {
+ printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+ return;
+ }
+
+ v = &vic_devices[vic_id];
+ v->base = base;
+ v->valid_sources = valid_sources;
+ v->resume_sources = resume_sources;
+ set_handle_irq(vic_handle_irq);
+ vic_id++;
+
+ if (parent_irq) {
+ irq_set_chained_handler_and_data(parent_irq,
+ vic_handle_irq_cascaded, v);
+ }
+
+ v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
+ /* create an IRQ mapping for each valid IRQ */
+ for (i = 0; i < fls(valid_sources); i++)
+ if (valid_sources & (1 << i))
+ irq_create_mapping(v->domain, i);
+ /* If no base IRQ was passed, figure out our allocated base */
+ if (irq)
+ v->irq = irq;
+ else
+ v->irq = irq_find_mapping(v->domain, 0);
+}
+
+static void vic_ack_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
+static struct vic_device *vic_from_irq(unsigned int irq)
+{
+ struct vic_device *v = vic_devices;
+ unsigned int base_irq = irq & ~31;
+ int id;
+
+ for (id = 0; id < vic_id; id++, v++) {
+ if (v->irq == base_irq)
+ return v;
+ }
+
+ return NULL;
+}
+
+static int vic_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct vic_device *v = vic_from_irq(d->irq);
+ unsigned int off = d->hwirq;
+ u32 bit = 1 << off;
+
+ if (!v)
+ return -EINVAL;
+
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
+ if (on)
+ v->resume_irqs |= bit;
+ else
+ v->resume_irqs &= ~bit;
+
+ return 0;
+}
+#else
+#define vic_set_wake NULL
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .irq_ack = vic_ack_irq,
+ .irq_mask = vic_mask_irq,
+ .irq_unmask = vic_unmask_irq,
+ .irq_set_wake = vic_set_wake,
+};
+
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ * and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, struct device_node *node)
+{
+ unsigned int i;
+ int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /*
+ * Make sure we clear all existing interrupts. The vector registers
+ * in this cell are after the second block of general registers,
+ * so we can address them using standard offsets, but only from
+ * the second base address, which is 0x20 in the page
+ */
+ if (vic_2nd_block) {
+ vic_clear_interrupts(base);
+
+ /* ST has 16 vectors as well, but we don't enable them by now */
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(0, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+ }
+
+ vic_register(base, 0, irq_start, vic_sources, 0, node);
+}
+
+static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
+ u32 vic_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ unsigned int i;
+ u32 cellid = 0;
+ enum amba_vendor vendor;
+
+ /* Identify which VIC cell this one is, by reading the ID */
+ for (i = 0; i < 4; i++) {
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ cellid |= (readl(addr) & 0xff) << (8 * i);
+ }
+ vendor = (cellid >> 12) & 0xff;
+ printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+ base, cellid, vendor);
+
+ switch(vendor) {
+ case AMBA_VENDOR_ST:
+ vic_init_st(base, irq_start, vic_sources, node);
+ return;
+ default:
+ printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+ fallthrough;
+ case AMBA_VENDOR_ARM:
+ break;
+ }
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
+
+ vic_init2(base);
+
+ vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources)
+{
+ __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+static int __init vic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *regs;
+ u32 interrupt_mask = ~0;
+ u32 wakeup_mask = ~0;
+ int parent_irq;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ of_property_read_u32(node, "valid-mask", &interrupt_mask);
+ of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+ parent_irq = of_irq_get(node, 0);
+ if (parent_irq < 0)
+ parent_irq = 0;
+
+ /*
+ * Passing 0 as first IRQ makes the simple domain allocate descriptors
+ */
+ __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
+#endif /* CONFIG OF */
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
new file mode 100644
index 000000000..e17dd3a8c
--- /dev/null
+++ b/drivers/irqchip/irq-vt8500.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/arm/mach-vt8500/irq.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ */
+
+/*
+ * This file is copied and modified from the original irq.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define VT8500_ICPC_IRQ 0x20
+#define VT8500_ICPC_FIQ 0x24
+#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */
+#define VT8500_ICIS 0x80 /* Interrupt status, 16*u32 */
+
+/* ICPC */
+#define ICPC_MASK 0x3F
+#define ICPC_ROTATE BIT(6)
+
+/* IC_DCTR */
+#define ICDC_IRQ 0x00
+#define ICDC_FIQ 0x01
+#define ICDC_DSS0 0x02
+#define ICDC_DSS1 0x03
+#define ICDC_DSS2 0x04
+#define ICDC_DSS3 0x05
+#define ICDC_DSS4 0x06
+#define ICDC_DSS5 0x07
+
+#define VT8500_INT_DISABLE 0
+#define VT8500_INT_ENABLE BIT(3)
+
+#define VT8500_TRIGGER_HIGH 0
+#define VT8500_TRIGGER_RISING BIT(5)
+#define VT8500_TRIGGER_FALLING BIT(6)
+#define VT8500_EDGE ( VT8500_TRIGGER_RISING \
+ | VT8500_TRIGGER_FALLING)
+
+/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */
+#define VT8500_INTC_MAX 2
+
+struct vt8500_irq_data {
+ void __iomem *base; /* IO Memory base address */
+ struct irq_domain *domain; /* Domain for this controller */
+};
+
+/* Global variable for accessing io-mem addresses */
+static struct vt8500_irq_data intc[VT8500_INTC_MAX];
+static u32 active_cnt = 0;
+
+static void vt8500_irq_mask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
+ u8 edge, dctr;
+ u32 status;
+
+ edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
+ if (edge) {
+ status = readl(stat_reg);
+
+ status |= (1 << (d->hwirq & 0x1f));
+ writel(status, stat_reg);
+ } else {
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+ }
+}
+
+static void vt8500_irq_unmask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr |= VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+}
+
+static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_EDGE;
+
+ switch (flow_type) {
+ case IRQF_TRIGGER_LOW:
+ return -EINVAL;
+ case IRQF_TRIGGER_HIGH:
+ dctr |= VT8500_TRIGGER_HIGH;
+ irq_set_handler_locked(d, handle_level_irq);
+ break;
+ case IRQF_TRIGGER_FALLING:
+ dctr |= VT8500_TRIGGER_FALLING;
+ irq_set_handler_locked(d, handle_edge_irq);
+ break;
+ case IRQF_TRIGGER_RISING:
+ dctr |= VT8500_TRIGGER_RISING;
+ irq_set_handler_locked(d, handle_edge_irq);
+ break;
+ }
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
+
+ return 0;
+}
+
+static struct irq_chip vt8500_irq_chip = {
+ .name = "vt8500",
+ .irq_ack = vt8500_irq_mask,
+ .irq_mask = vt8500_irq_mask,
+ .irq_unmask = vt8500_irq_unmask,
+ .irq_set_type = vt8500_irq_set_type,
+};
+
+static void __init vt8500_init_irq_hw(void __iomem *base)
+{
+ u32 i;
+
+ /* Enable rotating priority for IRQ */
+ writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ);
+ writel(0x00, base + VT8500_ICPC_FIQ);
+
+ /* Disable all interrupts and route them to IRQ */
+ for (i = 0; i < 64; i++)
+ writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i);
+}
+
+static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops vt8500_irq_domain_ops = {
+ .map = vt8500_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+{
+ u32 stat, i;
+ int irqnr;
+ void __iomem *base;
+
+ /* Loop through each active controller */
+ for (i=0; i<active_cnt; i++) {
+ base = intc[i].base;
+ irqnr = readl_relaxed(base) & 0x3F;
+ /*
+ Highest Priority register default = 63, so check that this
+ is a real interrupt by checking the status register
+ */
+ if (irqnr == 63) {
+ stat = readl_relaxed(base + VT8500_ICIS + 4);
+ if (!(stat & BIT(31)))
+ continue;
+ }
+
+ generic_handle_domain_irq(intc[i].domain, irqnr);
+ }
+}
+
+static int __init vt8500_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int irq, i;
+ struct device_node *np = node;
+
+ if (active_cnt == VT8500_INTC_MAX) {
+ pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
+ __func__);
+ goto out;
+ }
+
+ intc[active_cnt].base = of_iomap(np, 0);
+ intc[active_cnt].domain = irq_domain_add_linear(node, 64,
+ &vt8500_irq_domain_ops, &intc[active_cnt]);
+
+ if (!intc[active_cnt].base) {
+ pr_err("%s: Unable to map IO memory\n", __func__);
+ goto out;
+ }
+
+ if (!intc[active_cnt].domain) {
+ pr_err("%s: Unable to add irq domain!\n", __func__);
+ goto out;
+ }
+
+ set_handle_irq(vt8500_handle_irq);
+
+ vt8500_init_irq_hw(intc[active_cnt].base);
+
+ pr_info("vt8500-irq: Added interrupt controller\n");
+
+ active_cnt++;
+
+ /* check if this is a slaved controller */
+ if (of_irq_count(np) != 0) {
+ /* check that we have the correct number of interrupts */
+ if (of_irq_count(np) != 8) {
+ pr_err("%s: Incorrect IRQ map for slaved controller\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ irq = irq_of_parse_and_map(np, i);
+ enable_irq(irq);
+ }
+
+ pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
+ }
+out:
+ return 0;
+}
+
+IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
new file mode 100644
index 000000000..91df62a64
--- /dev/null
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2021 Jonathan Neuschäfer
+
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/printk.h>
+
+#include <asm/exception.h>
+
+#define AIC_SCR(x) ((x)*4) /* Source control registers */
+#define AIC_GEN 0x84 /* Interrupt group enable control register */
+#define AIC_GRSR 0x88 /* Interrupt group raw status register */
+#define AIC_IRSR 0x100 /* Interrupt raw status register */
+#define AIC_IASR 0x104 /* Interrupt active status register */
+#define AIC_ISR 0x108 /* Interrupt status register */
+#define AIC_IPER 0x10c /* Interrupt priority encoding register */
+#define AIC_ISNR 0x110 /* Interrupt source number register */
+#define AIC_IMR 0x114 /* Interrupt mask register */
+#define AIC_OISR 0x118 /* Output interrupt status register */
+#define AIC_MECR 0x120 /* Mask enable command register */
+#define AIC_MDCR 0x124 /* Mask disable command register */
+#define AIC_SSCR 0x128 /* Source set command register */
+#define AIC_SCCR 0x12c /* Source clear command register */
+#define AIC_EOSCR 0x130 /* End of service command register */
+
+#define AIC_SCR_SRCTYPE_LOW_LEVEL (0 << 6)
+#define AIC_SCR_SRCTYPE_HIGH_LEVEL (1 << 6)
+#define AIC_SCR_SRCTYPE_NEG_EDGE (2 << 6)
+#define AIC_SCR_SRCTYPE_POS_EDGE (3 << 6)
+#define AIC_SCR_PRIORITY(x) (x)
+#define AIC_SCR_PRIORITY_MASK 0x7
+
+#define AIC_NUM_IRQS 32
+
+struct wpcm450_aic {
+ void __iomem *regs;
+ struct irq_domain *domain;
+};
+
+static struct wpcm450_aic *aic;
+
+static void wpcm450_aic_init_hw(void)
+{
+ int i;
+
+ /* Disable (mask) all interrupts */
+ writel(0xffffffff, aic->regs + AIC_MDCR);
+
+ /*
+ * Make sure the interrupt controller is ready to serve new interrupts.
+ * Reading from IPER indicates that the nIRQ signal may be deasserted,
+ * and writing to EOSCR indicates that interrupt handling has finished.
+ */
+ readl(aic->regs + AIC_IPER);
+ writel(0, aic->regs + AIC_EOSCR);
+
+ /* Initialize trigger mode and priority of each interrupt source */
+ for (i = 0; i < AIC_NUM_IRQS; i++)
+ writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7),
+ aic->regs + AIC_SCR(i));
+}
+
+static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
+{
+ int hwirq;
+
+ /* Determine the interrupt source */
+ /* Read IPER to signal that nIRQ can be de-asserted */
+ hwirq = readl(aic->regs + AIC_IPER) / 4;
+
+ generic_handle_domain_irq(aic->domain, hwirq);
+}
+
+static void wpcm450_aic_eoi(struct irq_data *d)
+{
+ /* Signal end-of-service */
+ writel(0, aic->regs + AIC_EOSCR);
+}
+
+static void wpcm450_aic_mask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Disable (mask) the interrupt */
+ writel(mask, aic->regs + AIC_MDCR);
+}
+
+static void wpcm450_aic_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq);
+
+ /* Enable (unmask) the interrupt */
+ writel(mask, aic->regs + AIC_MECR);
+}
+
+static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ /*
+ * The hardware supports high/low level, as well as rising/falling edge
+ * modes, and the DT binding accommodates for that, but as long as
+ * other modes than high level mode are not used and can't be tested,
+ * they are rejected in this driver.
+ */
+ if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip wpcm450_aic_chip = {
+ .name = "wpcm450-aic",
+ .irq_eoi = wpcm450_aic_eoi,
+ .irq_mask = wpcm450_aic_mask,
+ .irq_unmask = wpcm450_aic_unmask,
+ .irq_set_type = wpcm450_aic_set_type,
+};
+
+static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
+{
+ if (hwirq >= AIC_NUM_IRQS)
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq);
+ irq_set_chip_data(irq, aic);
+ irq_set_probe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops wpcm450_aic_ops = {
+ .map = wpcm450_aic_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init wpcm450_aic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ if (parent)
+ return -EINVAL;
+
+ aic = kzalloc(sizeof(*aic), GFP_KERNEL);
+ if (!aic)
+ return -ENOMEM;
+
+ aic->regs = of_iomap(node, 0);
+ if (!aic->regs) {
+ pr_err("Failed to map WPCM450 AIC registers\n");
+ kfree(aic);
+ return -ENOMEM;
+ }
+
+ wpcm450_aic_init_hw();
+
+ set_handle_irq(wpcm450_aic_handle_irq);
+
+ aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 000000000..238d3d344
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+#define SPURIOUS_IRQ (-1U)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+ void __iomem *base;
+ struct irq_domain *root_domain;
+ u32 intr_mask;
+ u32 nr_irq;
+};
+
+static struct xintc_irq_chip *primary_intc;
+
+static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ iowrite32be(data, irqc->base + reg);
+ else
+ iowrite32(data, irqc->base + reg);
+}
+
+static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ return ioread32be(irqc->base + reg);
+ else
+ return ioread32(irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
+
+ pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irqd_is_level_type(d))
+ xintc_write(irqc, IAR, mask);
+
+ xintc_write(irqc, SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+ xintc_write(irqc, CIE, BIT(d->hwirq));
+}
+
+static void intc_ack(struct irq_data *d)
+{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+ xintc_write(irqc, IAR, BIT(d->hwirq));
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
+
+ pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+ xintc_write(irqc, CIE, mask);
+ xintc_write(irqc, IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "Xilinx INTC",
+ .irq_unmask = intc_enable_or_unmask,
+ .irq_mask = intc_disable_or_mask,
+ .irq_ack = intc_ack,
+ .irq_mask_ack = intc_mask_ack,
+};
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct xintc_irq_chip *irqc = d->host_data;
+
+ if (irqc->intr_mask & BIT(hw)) {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ irq_set_chip_data(irq, irqc);
+ return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xintc_irq_chip *irqc;
+
+ irqc = irq_data_get_irq_handler_data(&desc->irq_data);
+ chained_irq_enter(chip, desc);
+ do {
+ u32 hwirq = xintc_read(irqc, IVR);
+
+ if (hwirq == -1U)
+ break;
+
+ generic_handle_domain_irq(irqc->root_domain, hwirq);
+ } while (true);
+ chained_irq_exit(chip, desc);
+}
+
+static void xil_intc_handle_irq(struct pt_regs *regs)
+{
+ u32 hwirq;
+
+ do {
+ hwirq = xintc_read(primary_intc, IVR);
+ if (unlikely(hwirq == SPURIOUS_IRQ))
+ break;
+
+ generic_handle_domain_irq(primary_intc->root_domain, hwirq);
+ } while (true);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ struct xintc_irq_chip *irqc;
+ int ret, irq;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+ irqc->base = of_iomap(intc, 0);
+ BUG_ON(!irqc->base);
+
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
+ if (ret < 0) {
+ pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+ goto error;
+ }
+
+ ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+ if (ret < 0) {
+ pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+ irqc->intr_mask = 0;
+ }
+
+ if (irqc->intr_mask >> irqc->nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+ intc, irqc->nr_irq, irqc->intr_mask);
+
+
+ /*
+ * Disable all external interrupts until they are
+ * explicitly requested.
+ */
+ xintc_write(irqc, IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(irqc, IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ }
+
+ irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
+ &xintc_irq_domain_ops, irqc);
+ if (!irqc->root_domain) {
+ pr_err("irq-xilinx: Unable to create IRQ domain\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(intc, 0);
+ if (irq) {
+ irq_set_chained_handler_and_data(irq,
+ xil_intc_irq_handler,
+ irqc);
+ } else {
+ pr_err("irq-xilinx: interrupts property not in DT\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ primary_intc = irqc;
+ irq_set_default_host(primary_intc->root_domain);
+ set_handle_irq(xil_intc_handle_irq);
+ }
+
+ return 0;
+
+error:
+ iounmap(irqc->base);
+ kfree(irqc);
+ return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
new file mode 100644
index 000000000..8c581c985
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -0,0 +1,185 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+
+#define HW_IRQ_IPI_COUNT 2
+#define HW_IRQ_MX_BASE 2
+#define HW_IRQ_EXTERN_BASE 3
+
+static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
+
+static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < HW_IRQ_IPI_COUNT) {
+ struct irq_chip *irq_chip = d->host_data;
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "ipi");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+ }
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ return xtensa_irq_map(d, irq, hw);
+}
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
+ .xlate = xtensa_mx_irq_domain_xlate,
+ .map = xtensa_mx_irq_map,
+};
+
+void secondary_init_irq(void)
+{
+ __this_cpu_write(cached_irq_mask,
+ XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
+ xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
+}
+
+static void xtensa_mx_irq_mask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+ if (ext_irq >= HW_IRQ_MX_BASE) {
+ set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
+ return;
+ }
+ }
+ mask = __this_cpu_read(cached_irq_mask) & ~mask;
+ __this_cpu_write(cached_irq_mask, mask);
+ xtensa_set_sr(mask, intenable);
+}
+
+static void xtensa_mx_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+ if (ext_irq >= HW_IRQ_MX_BASE) {
+ set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
+ return;
+ }
+ }
+ mask |= __this_cpu_read(cached_irq_mask);
+ __this_cpu_write(cached_irq_mask, mask);
+ xtensa_set_sr(mask, intenable);
+}
+
+static void xtensa_mx_irq_enable(struct irq_data *d)
+{
+ xtensa_mx_irq_unmask(d);
+}
+
+static void xtensa_mx_irq_disable(struct irq_data *d)
+{
+ xtensa_mx_irq_mask(d);
+}
+
+static void xtensa_mx_irq_ack(struct irq_data *d)
+{
+ xtensa_set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_mx_irq_retrigger(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+ return 0;
+ xtensa_set_sr(mask, intset);
+ return 1;
+}
+
+static int xtensa_mx_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ int cpu = cpumask_any_and(dest, cpu_online_mask);
+ unsigned mask = 1u << cpu;
+
+ set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return 0;
+
+}
+
+static struct irq_chip xtensa_mx_irq_chip = {
+ .name = "xtensa-mx",
+ .irq_enable = xtensa_mx_irq_enable,
+ .irq_disable = xtensa_mx_irq_disable,
+ .irq_mask = xtensa_mx_irq_mask,
+ .irq_unmask = xtensa_mx_irq_unmask,
+ .irq_ack = xtensa_mx_irq_ack,
+ .irq_retrigger = xtensa_mx_irq_retrigger,
+ .irq_set_affinity = xtensa_mx_irq_set_affinity,
+};
+
+static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
+{
+ unsigned int i;
+
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+
+ /* Initialize default IRQ routing to CPU 0 */
+ for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
+ set_er(1, MIROUT(i));
+}
+
+int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+ &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ xtensa_mx_init_common(root_domain);
+ return 0;
+}
+
+static int __init xtensa_mx_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ xtensa_mx_init_common(root_domain);
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
new file mode 100644
index 000000000..ab12328be
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -0,0 +1,109 @@
+/*
+ * Xtensa built-in interrupt controller
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+unsigned int cached_irq_mask;
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0],
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_irq_domain_ops = {
+ .xlate = xtensa_pic_irq_domain_xlate,
+ .map = xtensa_irq_map,
+};
+
+static void xtensa_irq_mask(struct irq_data *d)
+{
+ cached_irq_mask &= ~(1 << d->hwirq);
+ xtensa_set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_unmask(struct irq_data *d)
+{
+ cached_irq_mask |= 1 << d->hwirq;
+ xtensa_set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_enable(struct irq_data *d)
+{
+ xtensa_irq_unmask(d);
+}
+
+static void xtensa_irq_disable(struct irq_data *d)
+{
+ xtensa_irq_mask(d);
+}
+
+static void xtensa_irq_ack(struct irq_data *d)
+{
+ xtensa_set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_irq_retrigger(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+ return 0;
+ xtensa_set_sr(mask, intset);
+ return 1;
+}
+
+static struct irq_chip xtensa_irq_chip = {
+ .name = "xtensa",
+ .irq_enable = xtensa_irq_enable,
+ .irq_disable = xtensa_irq_disable,
+ .irq_mask = xtensa_irq_mask,
+ .irq_unmask = xtensa_irq_unmask,
+ .irq_ack = xtensa_irq_ack,
+ .irq_retrigger = xtensa_irq_retrigger,
+};
+
+int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+ &xtensa_irq_domain_ops, &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+
+static int __init xtensa_pic_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+ &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000..7a72620fc
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs = &irq_data_get_chip_type(irqd)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ generic_handle_domain_irq(zevio_irq_domain, irqnr);
+ }
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
new file mode 100644
index 000000000..7899607fb
--- /dev/null
+++ b/drivers/irqchip/irqchip.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/platform_device.h>
+
+/*
+ * This special of_device_id is the sentinel at the end of the
+ * of_device_id[] array of all irqchips. It is automatically placed at
+ * the end of the array by the linker, thanks to being part of a
+ * special section.
+ */
+static const struct of_device_id
+irqchip_of_match_end __used __section("__irqchip_of_table_end");
+
+extern struct of_device_id __irqchip_of_table[];
+
+void __init irqchip_init(void)
+{
+ of_irq_init(__irqchip_of_table);
+ acpi_probe_device_table(irqchip);
+}
+
+int platform_irqchip_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *par_np = of_irq_find_parent(np);
+ of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+
+ if (!irq_init_cb) {
+ of_node_put(par_np);
+ return -EINVAL;
+ }
+
+ if (par_np == np)
+ par_np = NULL;
+
+ /*
+ * If there's a parent interrupt controller and none of the parent irq
+ * domains have been registered, that means the parent interrupt
+ * controller has not been initialized yet. it's not time for this
+ * interrupt controller to initialize. So, defer probe of this
+ * interrupt controller. The actual initialization callback of this
+ * interrupt controller can check for specific domains as necessary.
+ */
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+ of_node_put(par_np);
+ return -EPROBE_DEFER;
+ }
+
+ return irq_init_cb(np, par_np);
+}
+EXPORT_SYMBOL_GPL(platform_irqchip_probe);
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
new file mode 100644
index 000000000..18e696dc7
--- /dev/null
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Driver for interrupt combiners in the Top-level Control and Status
+ * Registers (TCSR) hardware block in Qualcomm Technologies chips.
+ * An interrupt combiner in this block combines a set of interrupts by
+ * OR'ing the individual interrupt signals into a summary interrupt
+ * signal routed to a parent interrupt controller, and provides read-
+ * only, 32-bit registers to query the status of individual interrupts.
+ * The status bit for IRQ n is bit (n % 32) within register (n / 32)
+ * of the given combiner. Thus, each combiner can be described as a set
+ * of register offsets and the number of IRQs managed.
+ */
+
+#define pr_fmt(fmt) "QCOM80B1:" fmt
+
+#include <linux/acpi.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#define REG_SIZE 32
+
+struct combiner_reg {
+ void __iomem *addr;
+ unsigned long enabled;
+};
+
+struct combiner {
+ struct irq_domain *domain;
+ int parent_irq;
+ u32 nirqs;
+ u32 nregs;
+ struct combiner_reg regs[];
+};
+
+static inline int irq_nr(u32 reg, u32 bit)
+{
+ return reg * REG_SIZE + bit;
+}
+
+/*
+ * Handler for the cascaded IRQ.
+ */
+static void combiner_handle_irq(struct irq_desc *desc)
+{
+ struct combiner *combiner = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 reg;
+
+ chained_irq_enter(chip, desc);
+
+ for (reg = 0; reg < combiner->nregs; reg++) {
+ int hwirq;
+ u32 bit;
+ u32 status;
+
+ bit = readl_relaxed(combiner->regs[reg].addr);
+ status = bit & combiner->regs[reg].enabled;
+ if (bit && !status)
+ pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
+ smp_processor_id(), bit,
+ combiner->regs[reg].enabled,
+ combiner->regs[reg].addr);
+
+ while (status) {
+ bit = __ffs(status);
+ status &= ~(1 << bit);
+ hwirq = irq_nr(reg, bit);
+ generic_handle_domain_irq(combiner->domain, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void combiner_irq_chip_mask_irq(struct irq_data *data)
+{
+ struct combiner *combiner = irq_data_get_irq_chip_data(data);
+ struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+ clear_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static void combiner_irq_chip_unmask_irq(struct irq_data *data)
+{
+ struct combiner *combiner = irq_data_get_irq_chip_data(data);
+ struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
+
+ set_bit(data->hwirq % REG_SIZE, &reg->enabled);
+}
+
+static struct irq_chip irq_chip = {
+ .irq_mask = combiner_irq_chip_mask_irq,
+ .irq_unmask = combiner_irq_chip_unmask_irq,
+ .name = "qcom-irq-combiner"
+};
+
+static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_noprobe(irq);
+ return 0;
+}
+
+static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
+{
+ irq_domain_reset_irq_data(irq_get_irq_data(irq));
+}
+
+static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct combiner *combiner = d->host_data;
+
+ if (is_acpi_node(fws->fwnode)) {
+ if (WARN_ON((fws->param_count != 2) ||
+ (fws->param[0] >= combiner->nirqs) ||
+ (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
+ (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
+ return -EINVAL;
+
+ *hwirq = fws->param[0];
+ *type = fws->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct irq_domain_ops domain_ops = {
+ .map = combiner_irq_map,
+ .unmap = combiner_irq_unmap,
+ .translate = combiner_irq_translate
+};
+
+static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
+{
+ int *count = context;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ ++(*count);
+ return AE_OK;
+}
+
+static int count_registers(struct platform_device *pdev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ int count = 0;
+
+ if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+ return -EINVAL;
+
+ status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+ count_registers_cb, &count);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ return count;
+}
+
+struct get_registers_context {
+ struct device *dev;
+ struct combiner *combiner;
+ int err;
+};
+
+static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
+{
+ struct get_registers_context *ctx = context;
+ struct acpi_resource_generic_register *reg;
+ phys_addr_t paddr;
+ void __iomem *vaddr;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &ares->data.generic_reg;
+ paddr = reg->address;
+ if ((reg->space_id != ACPI_SPACE_MEM) ||
+ (reg->bit_offset != 0) ||
+ (reg->bit_width > REG_SIZE)) {
+ dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
+ ctx->err = -EINVAL;
+ return AE_ERROR;
+ }
+
+ vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
+ if (!vaddr) {
+ dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
+ ctx->err = -ENOMEM;
+ return AE_ERROR;
+ }
+
+ ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
+ ctx->combiner->nirqs += reg->bit_width;
+ ctx->combiner->nregs++;
+ return AE_OK;
+}
+
+static int get_registers(struct platform_device *pdev, struct combiner *comb)
+{
+ acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
+ acpi_status status;
+ struct get_registers_context ctx;
+
+ if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
+ return -EINVAL;
+
+ ctx.dev = &pdev->dev;
+ ctx.combiner = comb;
+ ctx.err = 0;
+
+ status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
+ get_registers_cb, &ctx);
+ if (ACPI_FAILURE(status))
+ return ctx.err;
+ return 0;
+}
+
+static int __init combiner_probe(struct platform_device *pdev)
+{
+ struct combiner *combiner;
+ int nregs;
+ int err;
+
+ nregs = count_registers(pdev);
+ if (nregs <= 0) {
+ dev_err(&pdev->dev, "Error reading register resources\n");
+ return -EINVAL;
+ }
+
+ combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs),
+ GFP_KERNEL);
+ if (!combiner)
+ return -ENOMEM;
+
+ err = get_registers(pdev, combiner);
+ if (err < 0)
+ return err;
+
+ combiner->parent_irq = platform_get_irq(pdev, 0);
+ if (combiner->parent_irq <= 0)
+ return -EPROBE_DEFER;
+
+ combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
+ &domain_ops, combiner);
+ if (!combiner->domain)
+ /* Errors printed by irq_domain_create_linear */
+ return -ENODEV;
+
+ irq_set_chained_handler_and_data(combiner->parent_irq,
+ combiner_handle_irq, combiner);
+
+ dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
+ combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
+ return 0;
+}
+
+static const struct acpi_device_id qcom_irq_combiner_ids[] = {
+ { "QCOM80B1", },
+ { }
+};
+
+static struct platform_driver qcom_irq_combiner_probe = {
+ .driver = {
+ .name = "qcom-irq-combiner",
+ .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
+ },
+ .probe = combiner_probe,
+};
+builtin_platform_driver(qcom_irq_combiner_probe);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
new file mode 100644
index 000000000..d96916cf6
--- /dev/null
+++ b/drivers/irqchip/qcom-pdc.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/qcom/irq.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define PDC_MAX_GPIO_IRQS 256
+
+#define IRQ_ENABLE_BANK 0x10
+#define IRQ_i_CFG 0x110
+
+struct pdc_pin_region {
+ u32 pin_base;
+ u32 parent_base;
+ u32 cnt;
+};
+
+#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base)
+
+static DEFINE_RAW_SPINLOCK(pdc_lock);
+static void __iomem *pdc_base;
+static struct pdc_pin_region *pdc_region;
+static int pdc_region_cnt;
+
+static void pdc_reg_write(int reg, u32 i, u32 val)
+{
+ writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+}
+
+static u32 pdc_reg_read(int reg, u32 i)
+{
+ return readl_relaxed(pdc_base + reg + i * sizeof(u32));
+}
+
+static void pdc_enable_intr(struct irq_data *d, bool on)
+{
+ int pin_out = d->hwirq;
+ unsigned long enable;
+ unsigned long flags;
+ u32 index, mask;
+
+ index = pin_out / 32;
+ mask = pin_out % 32;
+
+ raw_spin_lock_irqsave(&pdc_lock, flags);
+ enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
+ __assign_bit(mask, &enable, on);
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+ raw_spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+static void qcom_pdc_gic_disable(struct irq_data *d)
+{
+ pdc_enable_intr(d, false);
+ irq_chip_disable_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+ pdc_enable_intr(d, true);
+ irq_chip_enable_parent(d);
+}
+
+/*
+ * GIC does not handle falling edge or active low. To allow falling edge and
+ * active low interrupts to be handled at GIC, PDC has an inverter that inverts
+ * falling edge into a rising edge and active low into an active high.
+ * For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
+ * set as per the table below.
+ * Level sensitive active low LOW
+ * Rising edge sensitive NOT USED
+ * Falling edge sensitive LOW
+ * Dual Edge sensitive NOT USED
+ * Level sensitive active High HIGH
+ * Falling Edge sensitive NOT USED
+ * Rising edge sensitive HIGH
+ * Dual Edge sensitive HIGH
+ */
+enum pdc_irq_config_bits {
+ PDC_LEVEL_LOW = 0b000,
+ PDC_EDGE_FALLING = 0b010,
+ PDC_LEVEL_HIGH = 0b100,
+ PDC_EDGE_RISING = 0b110,
+ PDC_EDGE_DUAL = 0b111,
+};
+
+/**
+ * qcom_pdc_gic_set_type: Configure PDC for the interrupt
+ *
+ * @d: the interrupt data
+ * @type: the interrupt type
+ *
+ * If @type is edge triggered, forward that as Rising edge as PDC
+ * takes care of converting falling edge to rising edge signal
+ * If @type is level, then forward that as level high as PDC
+ * takes care of converting falling edge to rising edge signal
+ */
+static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
+{
+ enum pdc_irq_config_bits pdc_type;
+ enum pdc_irq_config_bits old_pdc_type;
+ int ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ pdc_type = PDC_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ pdc_type = PDC_EDGE_FALLING;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ pdc_type = PDC_EDGE_DUAL;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ pdc_type = PDC_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ pdc_type = PDC_LEVEL_LOW;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
+ pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
+
+ ret = irq_chip_set_type_parent(d, type);
+ if (ret)
+ return ret;
+
+ /*
+ * When we change types the PDC can give a phantom interrupt.
+ * Clear it. Specifically the phantom shows up when reconfiguring
+ * polarity of interrupt without changing the state of the signal
+ * but let's be consistent and clear it always.
+ *
+ * Doing this works because we have IRQCHIP_SET_TYPE_MASKED so the
+ * interrupt will be cleared before the rest of the system sees it.
+ */
+ if (old_pdc_type != pdc_type)
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+
+ return 0;
+}
+
+static struct irq_chip qcom_pdc_gic_chip = {
+ .name = "PDC",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_disable = qcom_pdc_gic_disable,
+ .irq_enable = qcom_pdc_gic_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = qcom_pdc_gic_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
+ .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static struct pdc_pin_region *get_pin_region(int pin)
+{
+ int i;
+
+ for (i = 0; i < pdc_region_cnt; i++) {
+ if (pin >= pdc_region[i].pin_base &&
+ pin < pdc_region[i].pin_base + pdc_region[i].cnt)
+ return &pdc_region[i];
+ }
+
+ return NULL;
+}
+
+static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct pdc_pin_region *region;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ if (hwirq == GPIO_NO_WAKE_IRQ)
+ return irq_domain_disconnect_hierarchy(domain, virq);
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &qcom_pdc_gic_chip, NULL);
+ if (ret)
+ return ret;
+
+ region = get_pin_region(hwirq);
+ if (!region)
+ return irq_domain_disconnect_hierarchy(domain->parent, virq);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = pin_to_hwirq(region, hwirq);
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_pdc_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = qcom_pdc_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int pdc_setup_pin_mapping(struct device_node *np)
+{
+ int ret, n, i;
+ u32 irq_index, reg_index, val;
+
+ n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
+ if (n <= 0 || n % 3)
+ return -EINVAL;
+
+ pdc_region_cnt = n / 3;
+ pdc_region = kcalloc(pdc_region_cnt, sizeof(*pdc_region), GFP_KERNEL);
+ if (!pdc_region) {
+ pdc_region_cnt = 0;
+ return -ENOMEM;
+ }
+
+ for (n = 0; n < pdc_region_cnt; n++) {
+ ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+ n * 3 + 0,
+ &pdc_region[n].pin_base);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+ n * 3 + 1,
+ &pdc_region[n].parent_base);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
+ n * 3 + 2,
+ &pdc_region[n].cnt);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pdc_region[n].cnt; i++) {
+ reg_index = (i + pdc_region[n].pin_base) >> 5;
+ irq_index = (i + pdc_region[n].pin_base) & 0x1f;
+ val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index);
+ val &= ~BIT(irq_index);
+ pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val);
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *pdc_domain;
+ int ret;
+
+ pdc_base = of_iomap(node, 0);
+ if (!pdc_base) {
+ pr_err("%pOF: unable to map PDC registers\n", node);
+ return -ENXIO;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to find PDC's parent domain\n", node);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ ret = pdc_setup_pin_mapping(node);
+ if (ret) {
+ pr_err("%pOF: failed to init PDC pin-hwirq mapping\n", node);
+ goto fail;
+ }
+
+ pdc_domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
+ PDC_MAX_GPIO_IRQS,
+ of_fwnode_handle(node),
+ &qcom_pdc_ops, NULL);
+ if (!pdc_domain) {
+ pr_err("%pOF: PDC domain add failed\n", node);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP);
+
+ return 0;
+
+fail:
+ kfree(pdc_region);
+ iounmap(pdc_base);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
+IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
new file mode 100644
index 000000000..7c17a6f64
--- /dev/null
+++ b/drivers/irqchip/spear-shirq.c
@@ -0,0 +1,290 @@
+/*
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009-2012 ST Microelectronics
+ * Viresh Kumar <vireshk@kernel.org>
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+/*
+ * struct spear_shirq: shared irq structure
+ *
+ * base: Base register address
+ * status_reg: Status register offset for chained interrupt handler
+ * mask_reg: Mask register offset for irq chip
+ * mask: Mask to apply to the status register
+ * virq_base: Base virtual interrupt number
+ * nr_irqs: Number of interrupts handled by this block
+ * offset: Bit offset of the first interrupt
+ * irq_chip: Interrupt controller chip used for this instance,
+ * if NULL group is disabled, but accounted
+ */
+struct spear_shirq {
+ void __iomem *base;
+ u32 status_reg;
+ u32 mask_reg;
+ u32 mask;
+ u32 virq_base;
+ u32 nr_irqs;
+ u32 offset;
+ struct irq_chip *irq_chip;
+};
+
+/* spear300 shared irq registers offsets and masks */
+#define SPEAR300_INT_ENB_MASK_REG 0x54
+#define SPEAR300_INT_STS_MASK_REG 0x58
+
+static DEFINE_RAW_SPINLOCK(shirq_lock);
+
+static void shirq_irq_mask(struct irq_data *d)
+{
+ struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+ u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
+ u32 __iomem *reg = shirq->base + shirq->mask_reg;
+
+ raw_spin_lock(&shirq_lock);
+ val = readl(reg) & ~(0x1 << shift);
+ writel(val, reg);
+ raw_spin_unlock(&shirq_lock);
+}
+
+static void shirq_irq_unmask(struct irq_data *d)
+{
+ struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+ u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
+ u32 __iomem *reg = shirq->base + shirq->mask_reg;
+
+ raw_spin_lock(&shirq_lock);
+ val = readl(reg) | (0x1 << shift);
+ writel(val, reg);
+ raw_spin_unlock(&shirq_lock);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear-shirq",
+ .irq_mask = shirq_irq_mask,
+ .irq_unmask = shirq_irq_unmask,
+};
+
+static struct spear_shirq spear300_shirq_ras1 = {
+ .offset = 0,
+ .nr_irqs = 9,
+ .mask = ((0x1 << 9) - 1) << 0,
+ .irq_chip = &shirq_chip,
+ .status_reg = SPEAR300_INT_STS_MASK_REG,
+ .mask_reg = SPEAR300_INT_ENB_MASK_REG,
+};
+
+static struct spear_shirq *spear300_shirq_blocks[] = {
+ &spear300_shirq_ras1,
+};
+
+/* spear310 shared irq registers offsets and masks */
+#define SPEAR310_INT_STS_MASK_REG 0x04
+
+static struct spear_shirq spear310_shirq_ras1 = {
+ .offset = 0,
+ .nr_irqs = 8,
+ .mask = ((0x1 << 8) - 1) << 0,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_ras2 = {
+ .offset = 8,
+ .nr_irqs = 5,
+ .mask = ((0x1 << 5) - 1) << 8,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_ras3 = {
+ .offset = 13,
+ .nr_irqs = 1,
+ .mask = ((0x1 << 1) - 1) << 13,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear310_shirq_intrcomm_ras = {
+ .offset = 14,
+ .nr_irqs = 3,
+ .mask = ((0x1 << 3) - 1) << 14,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq *spear310_shirq_blocks[] = {
+ &spear310_shirq_ras1,
+ &spear310_shirq_ras2,
+ &spear310_shirq_ras3,
+ &spear310_shirq_intrcomm_ras,
+};
+
+/* spear320 shared irq registers offsets and masks */
+#define SPEAR320_INT_STS_MASK_REG 0x04
+#define SPEAR320_INT_CLR_MASK_REG 0x04
+#define SPEAR320_INT_ENB_MASK_REG 0x08
+
+static struct spear_shirq spear320_shirq_ras3 = {
+ .offset = 0,
+ .nr_irqs = 7,
+ .mask = ((0x1 << 7) - 1) << 0,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear320_shirq_ras1 = {
+ .offset = 7,
+ .nr_irqs = 3,
+ .mask = ((0x1 << 3) - 1) << 7,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear320_shirq_ras2 = {
+ .offset = 10,
+ .nr_irqs = 1,
+ .mask = ((0x1 << 1) - 1) << 10,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq spear320_shirq_intrcomm_ras = {
+ .offset = 11,
+ .nr_irqs = 11,
+ .mask = ((0x1 << 11) - 1) << 11,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+};
+
+static struct spear_shirq *spear320_shirq_blocks[] = {
+ &spear320_shirq_ras3,
+ &spear320_shirq_ras1,
+ &spear320_shirq_ras2,
+ &spear320_shirq_intrcomm_ras,
+};
+
+static void shirq_handler(struct irq_desc *desc)
+{
+ struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
+ u32 pend;
+
+ pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
+ pend >>= shirq->offset;
+
+ while (pend) {
+ int irq = __ffs(pend);
+
+ pend &= ~(0x1 << irq);
+ generic_handle_irq(shirq->virq_base + irq);
+ }
+}
+
+static void __init spear_shirq_register(struct spear_shirq *shirq,
+ int parent_irq)
+{
+ int i;
+
+ if (!shirq->irq_chip)
+ return;
+
+ irq_set_chained_handler_and_data(parent_irq, shirq_handler, shirq);
+
+ for (i = 0; i < shirq->nr_irqs; i++) {
+ irq_set_chip_and_handler(shirq->virq_base + i,
+ shirq->irq_chip, handle_simple_irq);
+ irq_set_chip_data(shirq->virq_base + i, shirq);
+ }
+}
+
+static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
+ struct device_node *np)
+{
+ int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
+ struct irq_domain *shirq_domain;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map shirq registers\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < block_nr; i++)
+ nr_irqs += shirq_blocks[i]->nr_irqs;
+
+ virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+ if (virq_base < 0) {
+ pr_err("%s: irq desc alloc failed\n", __func__);
+ goto err_unmap;
+ }
+
+ shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (WARN_ON(!shirq_domain)) {
+ pr_warn("%s: irq domain init failed\n", __func__);
+ goto err_free_desc;
+ }
+
+ for (i = 0; i < block_nr; i++) {
+ shirq_blocks[i]->base = base;
+ shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
+ hwirq);
+
+ parent_irq = irq_of_parse_and_map(np, i);
+ spear_shirq_register(shirq_blocks[i], parent_irq);
+ hwirq += shirq_blocks[i]->nr_irqs;
+ }
+
+ return 0;
+
+err_free_desc:
+ irq_free_descs(virq_base, nr_irqs);
+err_unmap:
+ iounmap(base);
+ return -ENXIO;
+}
+
+static int __init spear300_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear300_shirq_blocks,
+ ARRAY_SIZE(spear300_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
+
+static int __init spear310_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear310_shirq_blocks,
+ ARRAY_SIZE(spear310_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
+
+static int __init spear320_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear320_shirq_blocks,
+ ARRAY_SIZE(spear320_shirq_blocks), np);
+}
+IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);