summaryrefslogtreecommitdiffstats
path: root/drivers/fpga
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/fpga
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/fpga')
-rw-r--r--drivers/fpga/Kconfig281
-rw-r--r--drivers/fpga/Makefile60
-rw-r--r--drivers/fpga/altera-cvp.c718
-rw-r--r--drivers/fpga/altera-fpga2sdram.c174
-rw-r--r--drivers/fpga/altera-freeze-bridge.c278
-rw-r--r--drivers/fpga/altera-hps2fpga.c227
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c46
-rw-r--r--drivers/fpga/altera-pr-ip-core.c201
-rw-r--r--drivers/fpga/altera-ps-spi.c332
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c405
-rw-r--r--drivers/fpga/dfl-afu-error.c249
-rw-r--r--drivers/fpga/dfl-afu-main.c988
-rw-r--r--drivers/fpga/dfl-afu-region.c167
-rw-r--r--drivers/fpga/dfl-afu.h109
-rw-r--r--drivers/fpga/dfl-fme-br.c109
-rw-r--r--drivers/fpga/dfl-fme-error.c377
-rw-r--r--drivers/fpga/dfl-fme-main.c762
-rw-r--r--drivers/fpga/dfl-fme-mgr.c321
-rw-r--r--drivers/fpga/dfl-fme-perf.c1022
-rw-r--r--drivers/fpga/dfl-fme-pr.c478
-rw-r--r--drivers/fpga/dfl-fme-pr.h84
-rw-r--r--drivers/fpga/dfl-fme-region.c88
-rw-r--r--drivers/fpga/dfl-fme.h44
-rw-r--r--drivers/fpga/dfl-n3000-nios.c588
-rw-r--r--drivers/fpga/dfl-pci.c451
-rw-r--r--drivers/fpga/dfl.c2037
-rw-r--r--drivers/fpga/dfl.h565
-rw-r--r--drivers/fpga/fpga-bridge.c441
-rw-r--r--drivers/fpga/fpga-mgr.c994
-rw-r--r--drivers/fpga/fpga-region.c318
-rw-r--r--drivers/fpga/ice40-spi.c211
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c775
-rw-r--r--drivers/fpga/lattice-sysconfig-spi.c153
-rw-r--r--drivers/fpga/lattice-sysconfig.c397
-rw-r--r--drivers/fpga/lattice-sysconfig.h39
-rw-r--r--drivers/fpga/machxo2-spi.c405
-rw-r--r--drivers/fpga/microchip-spi.c412
-rw-r--r--drivers/fpga/of-fpga-region.c484
-rw-r--r--drivers/fpga/socfpga-a10.c551
-rw-r--r--drivers/fpga/socfpga.c597
-rw-r--r--drivers/fpga/stratix10-soc.c503
-rw-r--r--drivers/fpga/tests/.kunitconfig5
-rw-r--r--drivers/fpga/tests/Kconfig11
-rw-r--r--drivers/fpga/tests/Makefile6
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c175
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c327
-rw-r--r--drivers/fpga/tests/fpga-region-test.c213
-rw-r--r--drivers/fpga/ts73xx-fpga.c132
-rw-r--r--drivers/fpga/versal-fpga.c80
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c186
-rw-r--r--drivers/fpga/xilinx-spi.c276
-rw-r--r--drivers/fpga/zynq-fpga.c659
-rw-r--r--drivers/fpga/zynqmp-fpga.c144
53 files changed, 19655 insertions, 0 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
new file mode 100644
index 0000000000..2f689ac4ba
--- /dev/null
+++ b/drivers/fpga/Kconfig
@@ -0,0 +1,281 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# FPGA framework configuration
+#
+
+menuconfig FPGA
+ tristate "FPGA Configuration Framework"
+ help
+ Say Y here if you want support for configuring FPGAs from the
+ kernel. The FPGA framework adds an FPGA manager class and FPGA
+ manager drivers.
+
+if FPGA
+
+config FPGA_MGR_SOCFPGA
+ tristate "Altera SOCFPGA FPGA Manager"
+ depends on ARCH_INTEL_SOCFPGA || COMPILE_TEST
+ help
+ FPGA manager driver support for Altera SOCFPGA.
+
+config FPGA_MGR_SOCFPGA_A10
+ tristate "Altera SoCFPGA Arria10"
+ depends on ARCH_INTEL_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ FPGA manager driver support for Altera Arria10 SoCFPGA.
+
+config ALTERA_PR_IP_CORE
+ tristate "Altera Partial Reconfiguration IP Core"
+ help
+ Core driver support for Altera Partial Reconfiguration IP component
+
+config ALTERA_PR_IP_CORE_PLAT
+ tristate "Platform support of Altera Partial Reconfiguration IP Core"
+ depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
+ help
+ Platform driver support for Altera Partial Reconfiguration IP
+ component
+
+config FPGA_MGR_ALTERA_PS_SPI
+ tristate "Altera FPGA Passive Serial over SPI"
+ depends on SPI
+ select BITREVERSE
+ help
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix
+ using the passive serial interface over SPI.
+
+config FPGA_MGR_ALTERA_CVP
+ tristate "Altera CvP FPGA Manager"
+ depends on PCI
+ help
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V,
+ Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe.
+
+config FPGA_MGR_ZYNQ_FPGA
+ tristate "Xilinx Zynq FPGA"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ help
+ FPGA manager driver support for Xilinx Zynq FPGAs.
+
+config FPGA_MGR_STRATIX10_SOC
+ tristate "Intel Stratix10 SoC FPGA Manager"
+ depends on (ARCH_INTEL_SOCFPGA && INTEL_STRATIX10_SERVICE)
+ help
+ FPGA manager driver support for the Intel Stratix10 SoC.
+
+config FPGA_MGR_XILINX_SPI
+ tristate "Xilinx Configuration over Slave Serial (SPI)"
+ depends on SPI
+ help
+ FPGA manager driver support for Xilinx FPGA configuration
+ over slave serial interface.
+
+config FPGA_MGR_ICE40_SPI
+ tristate "Lattice iCE40 SPI"
+ depends on OF && SPI
+ help
+ FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+
+config FPGA_MGR_MACHXO2_SPI
+ tristate "Lattice MachXO2 SPI"
+ depends on SPI
+ help
+ FPGA manager driver support for Lattice MachXO2 configuration
+ over slave SPI interface.
+
+config FPGA_MGR_TS73XX
+ tristate "Technologic Systems TS-73xx SBC FPGA Manager"
+ depends on ARCH_EP93XX && MACH_TS72XX
+ help
+ FPGA manager driver support for the Altera Cyclone II FPGA
+ present on the TS-73xx SBC boards.
+
+config FPGA_BRIDGE
+ tristate "FPGA Bridge Framework"
+ help
+ Say Y here if you want to support bridges connected between host
+ processors and FPGAs or between FPGAs.
+
+config SOCFPGA_FPGA_BRIDGE
+ tristate "Altera SoCFPGA FPGA Bridges"
+ depends on ARCH_INTEL_SOCFPGA && FPGA_BRIDGE
+ help
+ Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
+ devices.
+
+config ALTERA_FREEZE_BRIDGE
+ tristate "Altera FPGA Freeze Bridge"
+ depends on FPGA_BRIDGE && HAS_IOMEM
+ help
+ Say Y to enable drivers for Altera FPGA Freeze bridges. A
+ freeze bridge is a bridge that exists in the FPGA fabric to
+ isolate one region of the FPGA from the busses while that
+ region is being reprogrammed.
+
+config XILINX_PR_DECOUPLER
+ tristate "Xilinx LogiCORE PR Decoupler"
+ depends on FPGA_BRIDGE
+ depends on HAS_IOMEM
+ help
+ Say Y to enable drivers for Xilinx LogiCORE PR Decoupler
+ or Xilinx Dynamic Function eXchange AIX Shutdown Manager.
+ The PR Decoupler exists in the FPGA fabric to isolate one
+ region of the FPGA from the busses while that region is
+ being reprogrammed during partial reconfig.
+ The Dynamic Function eXchange AXI shutdown manager prevents
+ AXI traffic from passing through the bridge. The controller
+ safely handles AXI4MM and AXI4-Lite interfaces on a
+ Reconfigurable Partition when it is undergoing dynamic
+ reconfiguration, preventing the system deadlock that can
+ occur if AXI transactions are interrupted by DFX.
+
+config FPGA_REGION
+ tristate "FPGA Region"
+ depends on FPGA_BRIDGE
+ help
+ FPGA Region common code. An FPGA Region controls an FPGA Manager
+ and the FPGA Bridges associated with either a reconfigurable
+ region of an FPGA or a whole FPGA.
+
+config OF_FPGA_REGION
+ tristate "FPGA Region Device Tree Overlay Support"
+ depends on OF && FPGA_REGION
+ help
+ Support for loading FPGA images by applying a Device Tree
+ overlay.
+
+config FPGA_DFL
+ tristate "FPGA Device Feature List (DFL) support"
+ select FPGA_BRIDGE
+ select FPGA_REGION
+ depends on HAS_IOMEM
+ help
+ Device Feature List (DFL) defines a feature list structure that
+ creates a linked list of feature headers within the MMIO space
+ to provide an extensible way of adding features for FPGA.
+ Driver can walk through the feature headers to enumerate feature
+ devices (e.g. FPGA Management Engine, Port and Accelerator
+ Function Unit) and their private features for target FPGA devices.
+
+ Select this option to enable common support for Field-Programmable
+ Gate Array (FPGA) solutions which implement Device Feature List.
+ It provides enumeration APIs and feature device infrastructure.
+
+config FPGA_DFL_FME
+ tristate "FPGA DFL FME Driver"
+ depends on FPGA_DFL && HWMON && PERF_EVENTS
+ help
+ The FPGA Management Engine (FME) is a feature device implemented
+ under Device Feature List (DFL) framework. Select this option to
+ enable the platform device driver for FME which implements all
+ FPGA platform level management features. There shall be one FME
+ per DFL based FPGA device.
+
+config FPGA_DFL_FME_MGR
+ tristate "FPGA DFL FME Manager Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Manager driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_BRIDGE
+ tristate "FPGA DFL FME Bridge Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Bridge driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_REGION
+ tristate "FPGA DFL FME Region Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Region driver for FPGA Management Engine.
+
+config FPGA_DFL_AFU
+ tristate "FPGA DFL AFU Driver"
+ depends on FPGA_DFL
+ help
+ This is the driver for FPGA Accelerated Function Unit (AFU) which
+ implements AFU and Port management features. A User AFU connects
+ to the FPGA infrastructure via a Port. There may be more than one
+ Port/AFU per DFL based FPGA device.
+
+config FPGA_DFL_NIOS_INTEL_PAC_N3000
+ tristate "FPGA DFL NIOS Driver for Intel PAC N3000"
+ depends on FPGA_DFL
+ select REGMAP
+ help
+ This is the driver for the N3000 Nios private feature on Intel
+ PAC (Programmable Acceleration Card) N3000. It communicates
+ with the embedded Nios processor to configure the retimers on
+ the card. It also instantiates the SPI master (spi-altera) for
+ the card's BMC (Board Management Controller).
+
+config FPGA_DFL_PCI
+ tristate "FPGA DFL PCIe Device Driver"
+ depends on PCI && FPGA_DFL
+ help
+ Select this option to enable PCIe driver for PCIe-based
+ Field-Programmable Gate Array (FPGA) solutions which implement
+ the Device Feature List (DFL). This driver provides interfaces
+ for userspace applications to configure, enumerate, open and access
+ FPGA accelerators on the FPGA DFL devices, enables system level
+ management functions such as FPGA partial reconfiguration, power
+ management and virtualization with DFL framework and DFL feature
+ device drivers.
+
+ To compile this as a module, choose M here.
+
+config FPGA_MGR_ZYNQMP_FPGA
+ tristate "Xilinx ZynqMP FPGA"
+ depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
+ help
+ FPGA manager driver support for Xilinx ZynqMP FPGAs.
+ This driver uses the processor configuration port(PCAP)
+ to configure the programmable logic(PL) through PS
+ on ZynqMP SoC.
+
+config FPGA_MGR_VERSAL_FPGA
+ tristate "Xilinx Versal FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Select this option to enable FPGA manager driver support for
+ Xilinx Versal SoC. This driver uses the firmware interface to
+ configure the programmable logic(PL).
+
+ To compile this as a module, choose M here.
+
+config FPGA_M10_BMC_SEC_UPDATE
+ tristate "Intel MAX10 BMC Secure Update driver"
+ depends on MFD_INTEL_M10_BMC_CORE
+ select FW_LOADER
+ select FW_UPLOAD
+ help
+ Secure update support for the Intel MAX10 board management
+ controller.
+
+ This is a subdriver of the Intel MAX10 board management controller
+ (BMC) and provides support for secure updates for the BMC image,
+ the FPGA image, the Root Entry Hashes, etc.
+
+config FPGA_MGR_MICROCHIP_SPI
+ tristate "Microchip Polarfire SPI FPGA manager"
+ depends on SPI
+ help
+ FPGA manager driver support for Microchip Polarfire FPGAs
+ programming over slave SPI interface with .dat formatted
+ bitstream image.
+
+config FPGA_MGR_LATTICE_SYSCONFIG
+ tristate
+
+config FPGA_MGR_LATTICE_SYSCONFIG_SPI
+ tristate "Lattice sysCONFIG SPI FPGA manager"
+ depends on SPI
+ select FPGA_MGR_LATTICE_SYSCONFIG
+ help
+ FPGA manager driver support for Lattice FPGAs programming over slave
+ SPI sysCONFIG interface.
+
+source "drivers/fpga/tests/Kconfig"
+
+endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
new file mode 100644
index 0000000000..352a261262
--- /dev/null
+++ b/drivers/fpga/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the fpga framework and fpga manager drivers.
+#
+
+# Core FPGA Manager Framework
+obj-$(CONFIG_FPGA) += fpga-mgr.o
+
+# FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o
+obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o
+obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
+obj-$(CONFIG_FPGA_MGR_MACHXO2_SPI) += machxo2-spi.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
+obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
+obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
+obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
+obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_FPGA_MGR_MICROCHIP_SPI) += microchip-spi.o
+obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG) += lattice-sysconfig.o
+obj-$(CONFIG_FPGA_MGR_LATTICE_SYSCONFIG_SPI) += lattice-sysconfig-spi.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
+
+# FPGA Secure Update Drivers
+obj-$(CONFIG_FPGA_M10_BMC_SEC_UPDATE) += intel-m10-bmc-sec-update.o
+
+# FPGA Bridge Drivers
+obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
+obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
+obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o
+obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
+
+# High Level Interfaces
+obj-$(CONFIG_FPGA_REGION) += fpga-region.o
+obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
+
+# FPGA Device Feature List Support
+obj-$(CONFIG_FPGA_DFL) += dfl.o
+obj-$(CONFIG_FPGA_DFL_FME) += dfl-fme.o
+obj-$(CONFIG_FPGA_DFL_FME_MGR) += dfl-fme-mgr.o
+obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
+obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
+obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
+
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
+dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+dfl-afu-objs += dfl-afu-error.o
+
+obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o
+
+# Drivers for FPGAs which implement DFL
+obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
+
+# KUnit tests
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += tests/
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
new file mode 100644
index 0000000000..4ffb9da537
--- /dev/null
+++ b/drivers/fpga/altera-cvp.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * Manage Altera FPGA firmware using PCIe CvP.
+ * Firmware must be in binary "rbf" format.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#define CVP_BAR 0 /* BAR used for data transfer in memory mode */
+#define CVP_DUMMY_WR 244 /* dummy writes to clear CvP state machine */
+#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
+
+/* Vendor Specific Extended Capability Registers */
+#define VSE_PCIE_EXT_CAP_ID 0x0
+#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
+
+#define VSE_CVP_STATUS 0x1c /* 32bit */
+#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
+#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
+#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
+#define VSE_CVP_STATUS_USERMODE BIT(21) /* USERMODE */
+#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
+#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
+
+#define VSE_CVP_MODE_CTRL 0x20 /* 32bit */
+#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
+#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
+
+#define VSE_CVP_DATA 0x28 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x2c /* 32bit */
+#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
+#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+#define VSE_CVP_PROG_CTRL_MASK GENMASK(1, 0)
+
+#define VSE_UNCOR_ERR_STATUS 0x34 /* 32bit */
+#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+
+#define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */
+/* V2 Defines */
+#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
+
+#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CHECK_CREDIT_US 10
+#define V2_POLL_TIMEOUT_US 1000000
+#define V2_USER_TIMEOUT_US 500000
+
+#define V1_POLL_TIMEOUT_US 10
+
+#define DRV_NAME "altera-cvp"
+#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+
+/* Write block sizes */
+#define ALTERA_CVP_V1_SIZE 4
+#define ALTERA_CVP_V2_SIZE 4096
+
+/* Optional CvP config error status check for debugging */
+static bool altera_cvp_chkcfg;
+
+struct cvp_priv;
+
+struct altera_cvp_conf {
+ struct fpga_manager *mgr;
+ struct pci_dev *pci_dev;
+ void __iomem *map;
+ void (*write_data)(struct altera_cvp_conf *conf,
+ u32 data);
+ char mgr_name[64];
+ u8 numclks;
+ u32 sent_packets;
+ u32 vsec_offset;
+ const struct cvp_priv *priv;
+};
+
+struct cvp_priv {
+ void (*switch_clk)(struct altera_cvp_conf *conf);
+ int (*clear_state)(struct altera_cvp_conf *conf);
+ int (*wait_credit)(struct fpga_manager *mgr, u32 blocks);
+ size_t block_size;
+ int poll_time_us;
+ int user_time_us;
+};
+
+static int altera_read_config_byte(struct altera_cvp_conf *conf,
+ int where, u8 *val)
+{
+ return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_read_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 *val)
+{
+ return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_write_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 val)
+{
+ return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 status;
+
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &status);
+
+ if (status & VSE_CVP_STATUS_CFG_DONE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (status & VSE_CVP_STATUS_CVP_EN)
+ return FPGA_MGR_STATE_POWER_UP;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
+{
+ writel(val, conf->map);
+}
+
+static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
+{
+ pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA,
+ val);
+}
+
+/* switches between CvP clock and internal clock */
+static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
+{
+ unsigned int i;
+ u32 val;
+
+ /* set 1 CVP clock cycle for every CVP Data Register Write */
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
+
+ for (i = 0; i < CVP_DUMMY_WR; i++)
+ conf->write_data(conf, 0); /* dummy data, could be any value */
+}
+
+static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
+ u32 status_val, int timeout_us)
+{
+ unsigned int retries;
+ u32 val;
+
+ retries = timeout_us / 10;
+ if (timeout_us % 10)
+ retries++;
+
+ do {
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
+ if ((val & status_mask) == status_val)
+ return 0;
+
+ /* use small usleep value to re-check and break early */
+ usleep_range(10, 11);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+ int ret;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
+ if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+/*
+ * CvP Version2 Functions
+ * Recent Intel FPGAs use a credit mechanism to throttle incoming
+ * bitstreams and a different method of clearing the state.
+ */
+
+static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf)
+{
+ u32 val;
+ int ret;
+
+ /* Clear the START_XFER and CVP_CONFIG bits */
+ ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Program Control Register\n");
+ return ret;
+ }
+
+ val &= ~VSE_CVP_PROG_CTRL_MASK;
+ ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error writing CVP Program Control Register\n");
+ return ret;
+ }
+
+ return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
+}
+
+static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr,
+ u32 blocks)
+{
+ u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US;
+ struct altera_cvp_conf *conf = mgr->priv;
+ int ret;
+ u8 val;
+
+ do {
+ ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Credit Register\n");
+ return ret;
+ }
+
+ /* Return if there is space in FIFO */
+ if (val - (u8)conf->sent_packets)
+ return 0;
+
+ ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "CE Bit error credit reg[0x%x]:sent[0x%x]\n",
+ val, conf->sent_packets);
+ return -EAGAIN;
+ }
+
+ /* Limit the check credit byte traffic */
+ usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1);
+ } while (timeout--);
+
+ dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n");
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_send_block(struct altera_cvp_conf *conf,
+ const u32 *data, size_t len)
+{
+ u32 mask, words = len / sizeof(u32);
+ int i, remainder;
+
+ for (i = 0; i < words; i++)
+ conf->write_data(conf, *data++);
+
+ /* write up to 3 trailing bytes, if any */
+ remainder = len % sizeof(u32);
+ if (remainder) {
+ mask = BIT(remainder * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+ }
+
+ return 0;
+}
+
+static int altera_cvp_teardown(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ int ret;
+ u32 val;
+
+ /* STEP 12 - reset START_XFER bit */
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ val &= ~VSE_CVP_PROG_CTRL_START_XFER;
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 13 - reset CVP_CONFIG bit */
+ val &= ~VSE_CVP_PROG_CTRL_CONFIG;
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+
+ /*
+ * STEP 14
+ * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
+ * writes to the HIP
+ */
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
+ if (ret)
+ dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
+
+ return ret;
+}
+
+static int altera_cvp_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 iflags, val;
+ int ret;
+
+ iflags = info ? info->flags : 0;
+
+ if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Determine allowed clock to data ratio */
+ if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
+ conf->numclks = 8; /* ratio for all compressed images */
+ else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ conf->numclks = 4; /* for uncompressed and encrypted images */
+ else
+ conf->numclks = 1; /* for uncompressed and unencrypted images */
+
+ /* STEP 1 - read CVP status and check CVP_EN flag */
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
+ if (!(val & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ if (val & VSE_CVP_STATUS_CFG_RDY) {
+ dev_warn(&mgr->dev, "CvP already started, tear down first\n");
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * STEP 2
+ * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
+ */
+ /* switch from fabric to PMA clock */
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
+
+ /* set CVP mode */
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_CVP_MODE;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
+
+ /*
+ * STEP 3
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (conf->priv->clear_state) {
+ ret = conf->priv->clear_state(conf);
+ if (ret) {
+ dev_err(&mgr->dev, "Problem clearing out state\n");
+ return ret;
+ }
+ }
+
+ conf->sent_packets = 0;
+
+ /* STEP 4 - set CVP_CONFIG bit */
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ /* request control block to begin transfer using CVP */
+ val |= VSE_CVP_PROG_CTRL_CONFIG;
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
+ VSE_CVP_STATUS_CFG_RDY,
+ conf->priv->poll_time_us);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+
+ /*
+ * STEP 6
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (altera_cvp_chkcfg) {
+ ret = altera_cvp_chk_error(mgr, 0);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+ }
+
+ /* STEP 7 - set START_XFER */
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ val |= VSE_CVP_PROG_CTRL_START_XFER;
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
+ if (conf->priv->switch_clk) {
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
+ }
+ return 0;
+}
+
+static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ size_t done, remaining, len;
+ const u32 *data;
+ int status = 0;
+
+ /* STEP 9 - write 32-bit data from RBF file to CVP data register */
+ data = (u32 *)buf;
+ remaining = count;
+ done = 0;
+
+ while (remaining) {
+ /* Use credit throttling if available */
+ if (conf->priv->wait_credit) {
+ status = conf->priv->wait_credit(mgr, done);
+ if (status) {
+ dev_err(&conf->pci_dev->dev,
+ "Wait Credit ERR: 0x%x\n", status);
+ return status;
+ }
+ }
+
+ len = min(conf->priv->block_size, remaining);
+ altera_cvp_send_block(conf, data, len);
+ data += len / sizeof(u32);
+ done += len;
+ remaining -= len;
+ conf->sent_packets++;
+
+ /*
+ * STEP 10 (optional) and STEP 11
+ * - check error flag
+ * - loop until data transfer completed
+ * Config images can be huge (more than 40 MiB), so
+ * only check after a new 4k data block has been written.
+ * This reduces the number of checks and speeds up the
+ * configuration process.
+ */
+ if (altera_cvp_chkcfg && !(done % SZ_4K)) {
+ status = altera_cvp_chk_error(mgr, done);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ if (altera_cvp_chkcfg)
+ status = altera_cvp_chk_error(mgr, count);
+
+ return status;
+}
+
+static int altera_cvp_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 mask, val;
+ int ret;
+
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+
+ /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
+ altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);
+ if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
+ dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
+ return -EPROTO;
+ }
+
+ /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
+
+ /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
+ mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
+ ret = altera_cvp_wait_status(conf, mask, mask,
+ conf->priv->user_time_us);
+ if (ret)
+ dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
+
+ return ret;
+}
+
+static const struct fpga_manager_ops altera_cvp_ops = {
+ .state = altera_cvp_state,
+ .write_init = altera_cvp_write_init,
+ .write = altera_cvp_write,
+ .write_complete = altera_cvp_write_complete,
+};
+
+static const struct cvp_priv cvp_priv_v1 = {
+ .switch_clk = altera_cvp_dummy_write,
+ .block_size = ALTERA_CVP_V1_SIZE,
+ .poll_time_us = V1_POLL_TIMEOUT_US,
+ .user_time_us = TIMEOUT_US,
+};
+
+static const struct cvp_priv cvp_priv_v2 = {
+ .clear_state = altera_cvp_v2_clear_state,
+ .wait_credit = altera_cvp_v2_wait_for_credit,
+ .block_size = ALTERA_CVP_V2_SIZE,
+ .poll_time_us = V2_POLL_TIMEOUT_US,
+ .user_time_us = V2_USER_TIMEOUT_US,
+};
+
+static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
+}
+
+static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kstrtobool(buf, &altera_cvp_chkcfg);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DRIVER_ATTR_RW(chkcfg);
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id);
+static void altera_cvp_remove(struct pci_dev *pdev);
+
+static struct pci_device_id altera_cvp_id_tbl[] = {
+ { PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
+
+static struct pci_driver altera_cvp_driver = {
+ .name = DRV_NAME,
+ .id_table = altera_cvp_id_tbl,
+ .probe = altera_cvp_probe,
+ .remove = altera_cvp_remove,
+};
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct altera_cvp_conf *conf;
+ struct fpga_manager *mgr;
+ int ret, offset;
+ u16 cmd, val;
+ u32 regval;
+
+ /* Discover the Vendor Specific Offset for this device */
+ offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
+ if (!offset) {
+ dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * First check if this is the expected FPGA device. PCI config
+ * space access works without enabling the PCI device, memory
+ * space access is enabled further down.
+ */
+ pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
+ if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
+ dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, &regval);
+ if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&pdev->dev,
+ "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
+ regval);
+ return -ENODEV;
+ }
+
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ conf->vsec_offset = offset;
+
+ /*
+ * Enable memory BAR access. We cannot use pci_enable_device() here
+ * because it will make the driver unusable with FPGA devices that
+ * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
+ * platform. Such BARs will not have an assigned address range and
+ * pci_enable_device() will fail, complaining about not claimed BAR,
+ * even if the concerned BAR is not needed for FPGA configuration
+ * at all. Thus, enable the device via PCI config space command.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = pci_request_region(pdev, CVP_BAR, "CVP");
+ if (ret) {
+ dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
+ goto err_disable;
+ }
+
+ conf->pci_dev = pdev;
+ conf->write_data = altera_cvp_write_data_iomem;
+
+ if (conf->vsec_offset == V1_VSEC_OFFSET)
+ conf->priv = &cvp_priv_v1;
+ else
+ conf->priv = &cvp_priv_v2;
+
+ conf->map = pci_iomap(pdev, CVP_BAR, 0);
+ if (!conf->map) {
+ dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
+ conf->write_data = altera_cvp_write_data_config;
+ }
+
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
+ ALTERA_CVP_MGR_NAME, pci_name(pdev));
+
+ mgr = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (IS_ERR(mgr)) {
+ ret = PTR_ERR(mgr);
+ goto err_unmap;
+ }
+
+ pci_set_drvdata(pdev, mgr);
+
+ return 0;
+
+err_unmap:
+ if (conf->map)
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+err_disable:
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return ret;
+}
+
+static void altera_cvp_remove(struct pci_dev *pdev)
+{
+ struct fpga_manager *mgr = pci_get_drvdata(pdev);
+ struct altera_cvp_conf *conf = mgr->priv;
+ u16 cmd;
+
+ fpga_mgr_unregister(mgr);
+ if (conf->map)
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+static int __init altera_cvp_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&altera_cvp_driver);
+ if (ret)
+ return ret;
+
+ ret = driver_create_file(&altera_cvp_driver.driver,
+ &driver_attr_chkcfg);
+ if (ret)
+ pr_warn("Can't create sysfs chkcfg file\n");
+
+ return 0;
+}
+
+static void __exit altera_cvp_exit(void)
+{
+ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+ pci_unregister_driver(&altera_cvp_driver);
+}
+
+module_init(altera_cvp_init);
+module_exit(altera_cvp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
new file mode 100644
index 0000000000..1fa2ccc321
--- /dev/null
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ */
+
+/*
+ * This driver manages a bridge between an FPGA and the SDRAM used by the ARM
+ * host processor system (HPS).
+ *
+ * The bridge contains 4 read ports, 4 write ports, and 6 command ports.
+ * Reconfiguring these ports requires that no SDRAM transactions occur during
+ * reconfiguration. The code reconfiguring the ports cannot run out of SDRAM
+ * nor can the FPGA access the SDRAM during reconfiguration. This driver does
+ * not support reconfiguring the ports. The ports are configured by code
+ * running out of on chip ram before Linux is started and the configuration
+ * is passed in a handoff register in the system manager.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
+#define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK 0x00003fff
+#define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT 0
+#define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT 4
+#define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT 8
+
+/*
+ * From the Cyclone V HPS Memory Map document:
+ * These registers are used to store handoff information between the
+ * preloader and the OS. These 8 registers can be used to store any
+ * information. The contents of these registers have no impact on
+ * the state of the HPS hardware.
+ */
+#define SYSMGR_ISWGRP_HANDOFF3 (0x8C)
+
+#define F2S_BRIDGE_NAME "fpga2sdram"
+
+struct alt_fpga2sdram_data {
+ struct device *dev;
+ struct regmap *sdrctl;
+ int mask;
+};
+
+static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge)
+{
+ struct alt_fpga2sdram_data *priv = bridge->priv;
+ int value;
+
+ regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value);
+
+ return (value & priv->mask) == priv->mask;
+}
+
+static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv,
+ bool enable)
+{
+ return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST,
+ priv->mask, enable ? priv->mask : 0);
+}
+
+static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_fpga2sdram_enable_set(bridge->priv, enable);
+}
+
+struct prop_map {
+ char *prop_name;
+ u32 *prop_value;
+ u32 prop_max;
+};
+
+static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
+ .enable_set = alt_fpga2sdram_enable_set,
+ .enable_show = alt_fpga2sdram_enable_show,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-fpga2sdram-bridge" },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct alt_fpga2sdram_data *priv;
+ struct fpga_bridge *br;
+ u32 enable;
+ struct regmap *sysmgr;
+ int ret = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl");
+ if (IS_ERR(priv->sdrctl)) {
+ dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n");
+ return PTR_ERR(priv->sdrctl);
+ }
+
+ sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+ if (IS_ERR(sysmgr)) {
+ dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n");
+ return PTR_ERR(sysmgr);
+ }
+
+ /* Get f2s bridge configuration saved in handoff register */
+ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
+
+ br = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
+
+ platform_set_drvdata(pdev, br);
+
+ dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+ ret = _alt_fpga2sdram_enable_set(priv, enable);
+ if (ret) {
+ fpga_bridge_unregister(br);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+
+ fpga_bridge_unregister(br);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver altera_fpga_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_fpga2sdram_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(altera_fpga_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
new file mode 100644
index 0000000000..0c3fb82269
--- /dev/null
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Freeze Bridge Controller
+ *
+ * Copyright (C) 2016 Altera Corporation. All rights reserved.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define FREEZE_CSR_STATUS_OFFSET 0
+#define FREEZE_CSR_CTRL_OFFSET 4
+#define FREEZE_CSR_ILLEGAL_REQ_OFFSET 8
+#define FREEZE_CSR_REG_VERSION 12
+
+#define FREEZE_CSR_SUPPORTED_VERSION 2
+#define FREEZE_CSR_OFFICIAL_VERSION 0xad000003
+
+#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0)
+#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1)
+
+#define FREEZE_CSR_CTRL_FREEZE_REQ BIT(0)
+#define FREEZE_CSR_CTRL_RESET_REQ BIT(1)
+#define FREEZE_CSR_CTRL_UNFREEZE_REQ BIT(2)
+
+#define FREEZE_BRIDGE_NAME "freeze"
+
+struct altera_freeze_br_data {
+ struct device *dev;
+ void __iomem *base_addr;
+ bool enable;
+};
+
+/*
+ * Poll status until status bit is set or we have a timeout.
+ */
+static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv,
+ u32 timeout, u32 req_ack)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_illegal_req_addr = priv->base_addr +
+ FREEZE_CSR_ILLEGAL_REQ_OFFSET;
+ u32 status, illegal, ctrl;
+ int ret = -ETIMEDOUT;
+
+ do {
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal) {
+ dev_err(dev, "illegal request detected 0x%x", illegal);
+
+ writel(1, csr_illegal_req_addr);
+
+ illegal = readl(csr_illegal_req_addr);
+ if (illegal)
+ dev_err(dev, "illegal request not cleared 0x%x",
+ illegal);
+
+ ret = -EINVAL;
+ break;
+ }
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack);
+ status &= req_ack;
+ if (status) {
+ ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET);
+ dev_dbg(dev, "%s request %x acknowledged %x %x\n",
+ __func__, req_ack, status, ctrl);
+ ret = 0;
+ break;
+ }
+
+ udelay(1);
+ } while (timeout--);
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "%s timeout waiting for 0x%x\n",
+ __func__, req_ack);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already disabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not enabled %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_FREEZE_REQ_DONE);
+
+ if (ret)
+ writel(0, csr_ctrl_addr);
+ else
+ writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr);
+
+ return ret;
+}
+
+static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv,
+ u32 timeout)
+{
+ struct device *dev = priv->dev;
+ void __iomem *csr_ctrl_addr = priv->base_addr +
+ FREEZE_CSR_CTRL_OFFSET;
+ u32 status;
+ int ret;
+
+ writel(0, csr_ctrl_addr);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) {
+ dev_dbg(dev, "%s bridge already enabled %d\n",
+ __func__, status);
+ return 0;
+ } else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) {
+ dev_err(dev, "%s bridge not frozen %d\n", __func__, status);
+ return -EINVAL;
+ }
+
+ writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr);
+
+ ret = altera_freeze_br_req_ack(priv, timeout,
+ FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE);
+
+ status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+ dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+ writel(0, csr_ctrl_addr);
+
+ return ret;
+}
+
+/*
+ * enable = 1 : allow traffic through the bridge
+ * enable = 0 : disable traffic through the bridge
+ */
+static int altera_freeze_br_enable_set(struct fpga_bridge *bridge,
+ bool enable)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+ struct fpga_image_info *info = bridge->info;
+ u32 timeout = 0;
+ int ret;
+
+ if (enable) {
+ if (info)
+ timeout = info->enable_timeout_us;
+
+ ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout);
+ } else {
+ if (info)
+ timeout = info->disable_timeout_us;
+
+ ret = altera_freeze_br_do_freeze(bridge->priv, timeout);
+ }
+
+ if (!ret)
+ priv->enable = enable;
+
+ return ret;
+}
+
+static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_freeze_br_data *priv = bridge->priv;
+
+ return priv->enable;
+}
+
+static const struct fpga_bridge_ops altera_freeze_br_br_ops = {
+ .enable_set = altera_freeze_br_enable_set,
+ .enable_show = altera_freeze_br_enable_show,
+};
+
+static const struct of_device_id altera_freeze_br_of_match[] = {
+ { .compatible = "altr,freeze-bridge-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
+
+static int altera_freeze_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ void __iomem *base_addr;
+ struct altera_freeze_br_data *priv;
+ struct fpga_bridge *br;
+ u32 status, revision;
+
+ if (!np)
+ return -ENODEV;
+
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base_addr))
+ return PTR_ERR(base_addr);
+
+ revision = readl(base_addr + FREEZE_CSR_REG_VERSION);
+ if ((revision != FREEZE_CSR_SUPPORTED_VERSION) &&
+ (revision != FREEZE_CSR_OFFICIAL_VERSION)) {
+ dev_err(dev,
+ "%s unexpected revision 0x%x != 0x%x != 0x%x\n",
+ __func__, revision, FREEZE_CSR_SUPPORTED_VERSION,
+ FREEZE_CSR_OFFICIAL_VERSION);
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ status = readl(base_addr + FREEZE_CSR_STATUS_OFFSET);
+ if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
+ priv->enable = 1;
+
+ priv->base_addr = base_addr;
+
+ br = fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
+
+ platform_set_drvdata(pdev, br);
+
+ return 0;
+}
+
+static int altera_freeze_br_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+
+ fpga_bridge_unregister(br);
+
+ return 0;
+}
+
+static struct platform_driver altera_freeze_br_driver = {
+ .probe = altera_freeze_br_probe,
+ .remove = altera_freeze_br_remove,
+ .driver = {
+ .name = "altera_freeze_br",
+ .of_match_table = altera_freeze_br_of_match,
+ },
+};
+
+module_platform_driver(altera_freeze_br_driver);
+
+MODULE_DESCRIPTION("Altera Freeze Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
new file mode 100644
index 0000000000..aa758426c2
--- /dev/null
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * Includes this patch from the mailing list:
+ * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters
+ * Signed-off-by: Anatolij Gustschin <agust@denx.de>
+ */
+
+/*
+ * This driver manages bridges on a Altera SOCFPGA between the ARM host
+ * processor system (HPS) and the embedded FPGA.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration. Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/clk.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define ALT_L3_REMAP_OFST 0x0
+#define ALT_L3_REMAP_MPUZERO_MSK 0x00000001
+#define ALT_L3_REMAP_H2F_MSK 0x00000008
+#define ALT_L3_REMAP_LWH2F_MSK 0x00000010
+
+#define HPS2FPGA_BRIDGE_NAME "hps2fpga"
+#define LWHPS2FPGA_BRIDGE_NAME "lwhps2fpga"
+#define FPGA2HPS_BRIDGE_NAME "fpga2hps"
+
+struct altera_hps2fpga_data {
+ const char *name;
+ struct reset_control *bridge_reset;
+ struct regmap *l3reg;
+ unsigned int remap_mask;
+ struct clk *clk;
+};
+
+static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+{
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ return reset_control_status(priv->bridge_reset);
+}
+
+/* The L3 REMAP register is write only, so keep a cached value. */
+static unsigned int l3_remap_shadow;
+static DEFINE_SPINLOCK(l3_remap_lock);
+
+static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ bool enable)
+{
+ unsigned long flags;
+ int ret;
+
+ /* bring bridge out of reset */
+ if (enable)
+ ret = reset_control_deassert(priv->bridge_reset);
+ else
+ ret = reset_control_assert(priv->bridge_reset);
+ if (ret)
+ return ret;
+
+ /* Allow bridge to be visible to L3 masters or not */
+ if (priv->remap_mask) {
+ spin_lock_irqsave(&l3_remap_lock, flags);
+ l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK;
+
+ if (enable)
+ l3_remap_shadow |= priv->remap_mask;
+ else
+ l3_remap_shadow &= ~priv->remap_mask;
+
+ ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST,
+ l3_remap_shadow);
+ spin_unlock_irqrestore(&l3_remap_lock, flags);
+ }
+
+ return ret;
+}
+
+static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ return _alt_hps2fpga_enable_set(bridge->priv, enable);
+}
+
+static const struct fpga_bridge_ops altera_hps2fpga_br_ops = {
+ .enable_set = alt_hps2fpga_enable_set,
+ .enable_show = alt_hps2fpga_enable_show,
+};
+
+static struct altera_hps2fpga_data hps2fpga_data = {
+ .name = HPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_H2F_MSK,
+};
+
+static struct altera_hps2fpga_data lwhps2fpga_data = {
+ .name = LWHPS2FPGA_BRIDGE_NAME,
+ .remap_mask = ALT_L3_REMAP_LWH2F_MSK,
+};
+
+static struct altera_hps2fpga_data fpga2hps_data = {
+ .name = FPGA2HPS_BRIDGE_NAME,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-hps2fpga-bridge",
+ .data = &hps2fpga_data },
+ { .compatible = "altr,socfpga-lwhps2fpga-bridge",
+ .data = &lwhps2fpga_data },
+ { .compatible = "altr,socfpga-fpga2hps-bridge",
+ .data = &fpga2hps_data },
+ {},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct altera_hps2fpga_data *priv;
+ const struct of_device_id *of_id;
+ struct fpga_bridge *br;
+ u32 enable;
+ int ret;
+
+ of_id = of_match_device(altera_fpga_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ priv = (struct altera_hps2fpga_data *)of_id->data;
+
+ priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
+ 0);
+ if (IS_ERR(priv->bridge_reset)) {
+ dev_err(dev, "Could not get %s reset control\n", priv->name);
+ return PTR_ERR(priv->bridge_reset);
+ }
+
+ if (priv->remap_mask) {
+ priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs");
+ if (IS_ERR(priv->l3reg)) {
+ dev_err(dev, "regmap for altr,l3regs lookup failed\n");
+ return PTR_ERR(priv->l3reg);
+ }
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+ } else {
+ dev_info(dev, "%s bridge\n",
+ (enable ? "enabling" : "disabling"));
+
+ ret = _alt_hps2fpga_enable_set(priv, enable);
+ if (ret)
+ goto err;
+ }
+ }
+
+ br = fpga_bridge_register(dev, priv->name,
+ &altera_hps2fpga_br_ops, priv);
+ if (IS_ERR(br)) {
+ ret = PTR_ERR(br);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, br);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct altera_hps2fpga_data *priv = bridge->priv;
+
+ fpga_bridge_unregister(bridge);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver alt_fpga_bridge_driver = {
+ .probe = alt_fpga_bridge_probe,
+ .remove = alt_fpga_bridge_remove,
+ .driver = {
+ .name = "altera_hps2fpga_bridge",
+ .of_match_table = of_match_ptr(altera_fpga_of_match),
+ },
+};
+
+module_platform_driver(alt_fpga_bridge_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
new file mode 100644
index 0000000000..9dc2639300
--- /dev/null
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016-2017 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ */
+#include <linux/fpga/altera-pr-ip-core.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+static int alt_pr_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *reg_base;
+
+ /* First mmio base is for register access */
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ return alt_pr_register(dev, reg_base);
+}
+
+static const struct of_device_id alt_pr_of_match[] = {
+ { .compatible = "altr,a10-pr-ip", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, alt_pr_of_match);
+
+static struct platform_driver alt_pr_platform_driver = {
+ .probe = alt_pr_platform_probe,
+ .driver = {
+ .name = "alt_a10_pr_ip",
+ .of_match_table = alt_pr_of_match,
+ },
+};
+
+module_platform_driver(alt_pr_platform_driver);
+MODULE_AUTHOR("Matthew Gerlach <matthew.gerlach@linux.intel.com>");
+MODULE_DESCRIPTION("Altera Partial Reconfiguration IP Platform Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
new file mode 100644
index 0000000000..df8671af4a
--- /dev/null
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016-2017 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ */
+#include <linux/delay.h>
+#include <linux/fpga/altera-pr-ip-core.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+
+#define ALT_PR_DATA_OFST 0x00
+#define ALT_PR_CSR_OFST 0x04
+
+#define ALT_PR_CSR_PR_START BIT(0)
+#define ALT_PR_CSR_STATUS_SFT 2
+#define ALT_PR_CSR_STATUS_MSK (7 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_NRESET (0 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_ERR (1 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_CRC_ERR (2 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_BAD_BITS (3 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_IN_PROG (4 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_SUCCESS (5 << ALT_PR_CSR_STATUS_SFT)
+
+struct alt_pr_priv {
+ void __iomem *reg_base;
+};
+
+static enum fpga_mgr_states alt_pr_fpga_state(struct fpga_manager *mgr)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ const char *err = "unknown";
+ enum fpga_mgr_states ret = FPGA_MGR_STATE_UNKNOWN;
+ u32 val;
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ val &= ALT_PR_CSR_STATUS_MSK;
+
+ switch (val) {
+ case ALT_PR_CSR_STATUS_NRESET:
+ return FPGA_MGR_STATE_RESET;
+
+ case ALT_PR_CSR_STATUS_PR_ERR:
+ err = "pr error";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_CRC_ERR:
+ err = "crc error";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_BAD_BITS:
+ err = "bad bits";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_PR_IN_PROG:
+ return FPGA_MGR_STATE_WRITE;
+
+ case ALT_PR_CSR_STATUS_PR_SUCCESS:
+ return FPGA_MGR_STATE_OPERATING;
+
+ default:
+ break;
+ }
+
+ dev_err(&mgr->dev, "encountered error code %d (%s) in %s()\n",
+ val, err, __func__);
+ return ret;
+}
+
+static int alt_pr_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ u32 val;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(&mgr->dev, "%s Partial Reconfiguration flag not set\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ if (val & ALT_PR_CSR_PR_START) {
+ dev_err(&mgr->dev,
+ "%s Partial Reconfiguration already started\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ writel(val | ALT_PR_CSR_PR_START, priv->reg_base + ALT_PR_CSR_OFST);
+
+ return 0;
+}
+
+static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->reg_base);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->reg_base);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->reg_base);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->reg_base);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ if (alt_pr_fpga_state(mgr) == FPGA_MGR_STATE_WRITE_ERR)
+ return -EIO;
+
+ return 0;
+}
+
+static int alt_pr_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ u32 i = 0;
+
+ do {
+ switch (alt_pr_fpga_state(mgr)) {
+ case FPGA_MGR_STATE_WRITE_ERR:
+ return -EIO;
+
+ case FPGA_MGR_STATE_OPERATING:
+ dev_info(&mgr->dev,
+ "successful partial reconfiguration\n");
+ return 0;
+
+ default:
+ break;
+ }
+ udelay(1);
+ } while (info->config_complete_timeout_us > i++);
+
+ dev_err(&mgr->dev, "timed out waiting for write to complete\n");
+ return -ETIMEDOUT;
+}
+
+static const struct fpga_manager_ops alt_pr_ops = {
+ .state = alt_pr_fpga_state,
+ .write_init = alt_pr_fpga_write_init,
+ .write = alt_pr_fpga_write,
+ .write_complete = alt_pr_fpga_write_complete,
+};
+
+int alt_pr_register(struct device *dev, void __iomem *reg_base)
+{
+ struct alt_pr_priv *priv;
+ struct fpga_manager *mgr;
+ u32 val;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg_base = reg_base;
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ dev_dbg(dev, "%s status=%d start=%d\n", __func__,
+ (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
+ (int)(val & ALT_PR_CSR_PR_START));
+
+ mgr = devm_fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+EXPORT_SYMBOL_GPL(alt_pr_register);
+
+MODULE_AUTHOR("Matthew Gerlach <matthew.gerlach@linux.intel.com>");
+MODULE_DESCRIPTION("Altera Partial Reconfiguration IP Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
new file mode 100644
index 0000000000..5e1e009dba
--- /dev/null
+++ b/drivers/fpga/altera-ps-spi.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Altera Passive Serial SPI Driver
+ *
+ * Copyright (c) 2017 United Western Technologies, Corporation
+ *
+ * Joshua Clayton <stillcompiling@gmail.com>
+ *
+ * Manage Altera FPGA firmware that is loaded over SPI using the passive
+ * serial configuration method.
+ * Firmware must be in binary "rbf" format.
+ * Works on Arria 10, Cyclone V and Stratix V. Should work on Cyclone series.
+ * May work on other Altera FPGAs.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+enum altera_ps_devtype {
+ CYCLONE5,
+ ARRIA10,
+};
+
+struct altera_ps_data {
+ enum altera_ps_devtype devtype;
+ int status_wait_min_us;
+ int status_wait_max_us;
+ int t_cfg_us;
+ int t_st2ck_us;
+};
+
+struct altera_ps_conf {
+ struct gpio_desc *config;
+ struct gpio_desc *confd;
+ struct gpio_desc *status;
+ struct spi_device *spi;
+ const struct altera_ps_data *data;
+ u32 info_flags;
+ char mgr_name[64];
+};
+
+/* | Arria 10 | Cyclone5 | Stratix5 |
+ * t_CF2ST0 | [; 600] | [; 600] | [; 600] |ns
+ * t_CFG | [2;] | [2;] | [2;] |µs
+ * t_STATUS | [268; 3000] | [268; 1506] | [268; 1506] |µs
+ * t_CF2ST1 | [; 3000] | [; 1506] | [; 1506] |µs
+ * t_CF2CK | [3010;] | [1506;] | [1506;] |µs
+ * t_ST2CK | [10;] | [2;] | [2;] |µs
+ * t_CD2UM | [175; 830] | [175; 437] | [175; 437] |µs
+ */
+static struct altera_ps_data c5_data = {
+ /* these values for Cyclone5 are compatible with Stratix5 */
+ .devtype = CYCLONE5,
+ .status_wait_min_us = 268,
+ .status_wait_max_us = 1506,
+ .t_cfg_us = 2,
+ .t_st2ck_us = 2,
+};
+
+static struct altera_ps_data a10_data = {
+ .devtype = ARRIA10,
+ .status_wait_min_us = 268, /* min(t_STATUS) */
+ .status_wait_max_us = 3000, /* max(t_CF2ST1) */
+ .t_cfg_us = 2, /* max { min(t_CFG), max(tCF2ST0) } */
+ .t_st2ck_us = 10, /* min(t_ST2CK) */
+};
+
+/* Array index is enum altera_ps_devtype */
+static const struct altera_ps_data *altera_ps_data_map[] = {
+ &c5_data,
+ &a10_data,
+};
+
+static const struct of_device_id of_ef_match[] = {
+ { .compatible = "altr,fpga-passive-serial", .data = &c5_data },
+ { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_ef_match);
+
+static enum fpga_mgr_states altera_ps_state(struct fpga_manager *mgr)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+
+ if (gpiod_get_value_cansleep(conf->status))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline void altera_ps_delay(int delay_us)
+{
+ if (delay_us > 10)
+ usleep_range(delay_us, delay_us + 5);
+ else
+ udelay(delay_us);
+}
+
+static int altera_ps_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ int min, max, waits;
+ int i;
+
+ conf->info_flags = info->flags;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 1);
+
+ /* wait min reset pulse time */
+ altera_ps_delay(conf->data->t_cfg_us);
+
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Status pin failed to show a reset\n");
+ return -EIO;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 0);
+
+ min = conf->data->status_wait_min_us;
+ max = conf->data->status_wait_max_us;
+ waits = max / min;
+ if (max % min)
+ waits++;
+
+ /* wait for max { max(t_STATUS), max(t_CF2ST1) } */
+ for (i = 0; i < waits; i++) {
+ usleep_range(min, min + 10);
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ /* wait for min(t_ST2CK)*/
+ altera_ps_delay(conf->data->t_st2ck_us);
+ return 0;
+ }
+ }
+
+ dev_err(&mgr->dev, "Status pin not ready.\n");
+ return -EIO;
+}
+
+static void rev_buf(char *buf, size_t len)
+{
+ u32 *fw32 = (u32 *)buf;
+ size_t extra_bytes = (len & 0x03);
+ const u32 *fw_end = (u32 *)(buf + len - extra_bytes);
+
+ /* set buffer to lsb first */
+ while (fw32 < fw_end) {
+ *fw32 = bitrev8x4(*fw32);
+ fw32++;
+ }
+
+ if (extra_bytes) {
+ buf = (char *)fw_end;
+ while (extra_bytes) {
+ *buf = bitrev8(*buf);
+ buf++;
+ extra_bytes--;
+ }
+ }
+}
+
+static int altera_ps_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ int ret;
+ size_t stride = min_t(size_t, fw_data_end - fw_data, SZ_4K);
+
+ if (!(conf->info_flags & FPGA_MGR_BITSTREAM_LSB_FIRST))
+ rev_buf((char *)fw_data, stride);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int altera_ps_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ static const char dummy[] = {0};
+ int ret;
+
+ if (gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Error during configuration.\n");
+ return -EIO;
+ }
+
+ if (conf->confd) {
+ if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+ dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * After CONF_DONE goes high, send two additional falling edges on DCLK
+ * to begin initialization and enter user mode
+ */
+ ret = spi_write(conf->spi, dummy, 1);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error during end sequence: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct fpga_manager_ops altera_ps_ops = {
+ .state = altera_ps_state,
+ .write_init = altera_ps_write_init,
+ .write = altera_ps_write,
+ .write_complete = altera_ps_write_complete,
+};
+
+static const struct altera_ps_data *id_to_data(const struct spi_device_id *id)
+{
+ kernel_ulong_t devtype = id->driver_data;
+ const struct altera_ps_data *data;
+
+ /* someone added a altera_ps_devtype without adding to the map array */
+ if (devtype >= ARRAY_SIZE(altera_ps_data_map))
+ return NULL;
+
+ data = altera_ps_data_map[devtype];
+ if (!data || data->devtype != devtype)
+ return NULL;
+
+ return data;
+}
+
+static int altera_ps_probe(struct spi_device *spi)
+{
+ struct altera_ps_conf *conf;
+ const struct of_device_id *of_id;
+ struct fpga_manager *mgr;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ if (spi->dev.of_node) {
+ of_id = of_match_device(of_ef_match, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
+ conf->data = of_id->data;
+ } else {
+ conf->data = id_to_data(spi_get_device_id(spi));
+ if (!conf->data)
+ return -ENODEV;
+ }
+
+ conf->spi = spi;
+ conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
+ if (IS_ERR(conf->config)) {
+ dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+ PTR_ERR(conf->config));
+ return PTR_ERR(conf->config);
+ }
+
+ conf->status = devm_gpiod_get(&spi->dev, "nstat", GPIOD_IN);
+ if (IS_ERR(conf->status)) {
+ dev_err(&spi->dev, "Failed to get status gpio: %ld\n",
+ PTR_ERR(conf->status));
+ return PTR_ERR(conf->status);
+ }
+
+ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
+ if (IS_ERR(conf->confd)) {
+ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ return PTR_ERR(conf->confd);
+ } else if (!conf->confd) {
+ dev_warn(&spi->dev, "Not using confd gpio");
+ }
+
+ /* Register manager with unique name */
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
+ dev_driver_string(&spi->dev), dev_name(&spi->dev));
+
+ mgr = devm_fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static const struct spi_device_id altera_ps_spi_ids[] = {
+ { "cyclone-ps-spi", CYCLONE5 },
+ { "fpga-passive-serial", CYCLONE5 },
+ { "fpga-arria10-passive-serial", ARRIA10 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
+
+static struct spi_driver altera_ps_driver = {
+ .driver = {
+ .name = "altera-ps-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_ef_match),
+ },
+ .id_table = altera_ps_spi_ids,
+ .probe = altera_ps_probe,
+};
+
+module_spi_driver(altera_ps_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joshua Clayton <stillcompiling@gmail.com>");
+MODULE_DESCRIPTION("Module to load Altera FPGA firmware over SPI");
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 0000000000..02b60fde04
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+
+#include "dfl-afu.h"
+
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ afu->dma_regions = RB_ROOT;
+}
+
+/**
+ * afu_dma_pin_pages - pin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be pinned
+ *
+ * Pin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+ int ret, pinned;
+
+ ret = account_locked_vm(current->mm, npages, true);
+ if (ret)
+ return ret;
+
+ region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!region->pages) {
+ ret = -ENOMEM;
+ goto unlock_vm;
+ }
+
+ pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
+ region->pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto free_pages;
+ } else if (pinned != npages) {
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ dev_dbg(dev, "%d pages pinned\n", pinned);
+
+ return 0;
+
+unpin_pages:
+ unpin_user_pages(region->pages, pinned);
+free_pages:
+ kfree(region->pages);
+unlock_vm:
+ account_locked_vm(current->mm, npages, false);
+ return ret;
+}
+
+/**
+ * afu_dma_unpin_pages - unpin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be unpinned
+ *
+ * Unpin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ long npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+
+ unpin_user_pages(region->pages, npages);
+ kfree(region->pages);
+ account_locked_vm(current->mm, npages, false);
+
+ dev_dbg(dev, "%ld pages unpinned\n", npages);
+}
+
+/**
+ * afu_dma_check_continuous_pages - check if pages are continuous
+ * @region: dma memory region
+ *
+ * Return true if pages of given dma memory region have continuous physical
+ * address, otherwise return false.
+ */
+static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < npages - 1; i++)
+ if (page_to_pfn(region->pages[i]) + 1 !=
+ page_to_pfn(region->pages[i + 1]))
+ return false;
+
+ return true;
+}
+
+/**
+ * dma_region_check_iova - check if memory area is fully contained in the region
+ * @region: dma memory region
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * Compare the dma memory area defined by @iova and @size with given dma region.
+ * Return true if memory area is fully contained in the region, otherwise false.
+ */
+static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
+ u64 iova, u64 size)
+{
+ if (!size && region->iova != iova)
+ return false;
+
+ return (region->iova <= iova) &&
+ (region->length + region->iova >= iova + size);
+}
+
+/**
+ * afu_dma_region_add - add given dma region to rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be added
+ *
+ * Return 0 for success, -EEXIST if dma region has already been added.
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node **new, *parent = NULL;
+
+ dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ new = &afu->dma_regions.rb_node;
+
+ while (*new) {
+ struct dfl_afu_dma_region *this;
+
+ this = container_of(*new, struct dfl_afu_dma_region, node);
+
+ parent = *new;
+
+ if (dma_region_check_iova(this, region->iova, region->length))
+ return -EEXIST;
+
+ if (region->iova < this->iova)
+ new = &((*new)->rb_left);
+ else if (region->iova > this->iova)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&region->node, parent, new);
+ rb_insert_color(&region->node, &afu->dma_regions);
+
+ return 0;
+}
+
+/**
+ * afu_dma_region_remove - remove given dma region from rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be removed
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu;
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+ rb_erase(&region->node, &afu->dma_regions);
+}
+
+/**
+ * afu_dma_region_destroy - destroy all regions in rbtree
+ * @pdata: feature device platform data
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = rb_first(&afu->dma_regions);
+ struct dfl_afu_dma_region *region;
+
+ while (node) {
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ rb_erase(node, &afu->dma_regions);
+
+ if (region->iova)
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length,
+ DMA_BIDIRECTIONAL);
+
+ if (region->pages)
+ afu_dma_unpin_pages(pdata, region);
+
+ node = rb_next(node);
+ kfree(region);
+ }
+}
+
+/**
+ * afu_dma_region_find - find the dma region from rbtree based on iova and size
+ * @pdata: feature device platform data
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * It finds the dma region from the rbtree based on @iova and @size:
+ * - if @size == 0, it finds the dma region which starts from @iova
+ * - otherwise, it finds the dma region which fully contains
+ * [@iova, @iova+size)
+ * If nothing is matched returns NULL.
+ *
+ * Needs to be called with pdata->lock held.
+ */
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = afu->dma_regions.rb_node;
+ struct device *dev = &pdata->dev->dev;
+
+ while (node) {
+ struct dfl_afu_dma_region *region;
+
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ if (dma_region_check_iova(region, iova, size)) {
+ dev_dbg(dev, "find region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+ return region;
+ }
+
+ if (iova < region->iova)
+ node = node->rb_left;
+ else if (iova > region->iova)
+ node = node->rb_right;
+ else
+ /* the iova region is not fully covered. */
+ break;
+ }
+
+ dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
+ (unsigned long long)iova, (unsigned long long)size);
+
+ return NULL;
+}
+
+/**
+ * afu_dma_region_find_iova - find the dma region from rbtree by iova
+ * @pdata: feature device platform data
+ * @iova: address of the dma region
+ *
+ * Needs to be called with pdata->lock held.
+ */
+static struct dfl_afu_dma_region *
+afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ return afu_dma_region_find(pdata, iova, 0);
+}
+
+/**
+ * afu_dma_map_region - map memory region for dma
+ * @pdata: feature device platform data
+ * @user_addr: address of the memory region
+ * @length: size of the memory region
+ * @iova: pointer of iova address
+ *
+ * Map memory region defined by @user_addr and @length, and return dma address
+ * of the memory region via @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova)
+{
+ struct dfl_afu_dma_region *region;
+ int ret;
+
+ /*
+ * Check Inputs, only accept page-aligned user memory region with
+ * valid length.
+ */
+ if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
+ return -EINVAL;
+
+ /* Check overflow */
+ if (user_addr + length < user_addr)
+ return -EINVAL;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->user_addr = user_addr;
+ region->length = length;
+
+ /* Pin the user memory region */
+ ret = afu_dma_pin_pages(pdata, region);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ goto free_region;
+ }
+
+ /* Only accept continuous pages, return error else */
+ if (!afu_dma_check_continuous_pages(region)) {
+ dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ ret = -EINVAL;
+ goto unpin_pages;
+ }
+
+ /* As pages are continuous then start to do DMA mapping */
+ region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->pages[0], 0,
+ region->length,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
+ dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ *iova = region->iova;
+
+ mutex_lock(&pdata->lock);
+ ret = afu_dma_region_add(pdata, region);
+ mutex_unlock(&pdata->lock);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ goto unmap_dma;
+ }
+
+ return 0;
+
+unmap_dma:
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+unpin_pages:
+ afu_dma_unpin_pages(pdata, region);
+free_region:
+ kfree(region);
+ return ret;
+}
+
+/**
+ * afu_dma_unmap_region - unmap dma memory region
+ * @pdata: feature device platform data
+ * @iova: dma address of the region
+ *
+ * Unmap dma memory region based on @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ struct dfl_afu_dma_region *region;
+
+ mutex_lock(&pdata->lock);
+ region = afu_dma_region_find_iova(pdata, iova);
+ if (!region) {
+ mutex_unlock(&pdata->lock);
+ return -EINVAL;
+ }
+
+ if (region->in_use) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ afu_dma_region_remove(pdata, region);
+ mutex_unlock(&pdata->lock);
+
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+ afu_dma_unpin_pages(pdata, region);
+ kfree(region);
+
+ return 0;
+}
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
new file mode 100644
index 0000000000..ab7be62173
--- /dev/null
+++ b/drivers/fpga/dfl-afu-error.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@linux.intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/fpga-dfl.h>
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+#define PORT_ERROR_MASK 0x8
+#define PORT_ERROR 0x10
+#define PORT_FIRST_ERROR 0x18
+#define PORT_MALFORMED_REQ0 0x20
+#define PORT_MALFORMED_REQ1 0x28
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+/* mask or unmask port errors by the error mask register. */
+static void __afu_port_err_mask(struct device *dev, bool mask)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
+}
+
+static void afu_port_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ mutex_lock(&pdata->lock);
+ __afu_port_err_mask(dev, mask);
+ mutex_unlock(&pdata->lock);
+}
+
+/* clear port errors. */
+static int afu_port_err_clear(struct device *dev, u64 err)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base_err, *base_hdr;
+ int enable_ret = 0, ret = -EBUSY;
+ u64 v;
+
+ base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+
+ /*
+ * clear Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* if device is still in AP6 power state, can not clear any error. */
+ v = readq(base_hdr + PORT_HDR_STS);
+ if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ goto done;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __afu_port_disable(pdev);
+ if (ret)
+ goto done;
+
+ /* Mask all errors */
+ __afu_port_err_mask(dev, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ v = readq(base_err + PORT_ERROR);
+
+ if (v == err) {
+ writeq(v, base_err + PORT_ERROR);
+
+ v = readq(base_err + PORT_FIRST_ERROR);
+ writeq(v, base_err + PORT_FIRST_ERROR);
+ } else {
+ dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
+ __func__, v, err);
+ ret = -EINVAL;
+ }
+
+ /* Clear mask */
+ __afu_port_err_mask(dev, false);
+
+ /* Enable the Port by clearing the reset */
+ enable_ret = __afu_port_enable(pdev);
+
+done:
+ mutex_unlock(&pdata->lock);
+ return enable_ret ? enable_ret : ret;
+}
+
+static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+
+static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ u64 value;
+ int ret;
+
+ if (kstrtou64(buff, 0, &value))
+ return -EINVAL;
+
+ ret = afu_port_err_clear(dev, value);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t first_malformed_req_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 req0, req1;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ req0 = readq(base + PORT_MALFORMED_REQ0);
+ req1 = readq(base + PORT_MALFORMED_REQ1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%016llx%016llx\n",
+ (unsigned long long)req1, (unsigned long long)req0);
+}
+static DEVICE_ATTR_RO(first_malformed_req);
+
+static struct attribute *port_err_attrs[] = {
+ &dev_attr_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_first_malformed_req.attr,
+ NULL,
+};
+
+static umode_t port_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group port_err_group = {
+ .name = "errors",
+ .attrs = port_err_attrs,
+ .is_visible = port_err_attrs_visible,
+};
+
+static int port_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void port_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, true);
+}
+
+static long
+port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
+const struct dfl_feature_id port_err_id_table[] = {
+ {.id = PORT_FEATURE_ID_ERROR,},
+ {0,}
+};
+
+const struct dfl_feature_ops port_err_ops = {
+ .init = port_err_init,
+ .uinit = port_err_uinit,
+ .ioctl = port_err_ioctl,
+};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
new file mode 100644
index 0000000000..7f621e96d3
--- /dev/null
+++ b/drivers/fpga/dfl-afu-main.c
@@ -0,0 +1,988 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl-afu.h"
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+/**
+ * __afu_port_enable - enable a port by clear reset
+ * @pdev: port platform device.
+ *
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __afu_port_enable function should only be used after __afu_port_disable
+ * function.
+ *
+ * The caller needs to hold lock for protection.
+ */
+int __afu_port_enable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ WARN_ON(!pdata->disable_count);
+
+ if (--pdata->disable_count != 0)
+ return 0;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Clear port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+
+ /*
+ * HW clears the ack bit to indicate that the port is fully out
+ * of reset.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+ !(v & PORT_CTRL_SFTRST_ACK),
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * __afu_port_disable - disable a port by hold reset
+ * @pdev: port platform device.
+ *
+ * Disable Port by setting the port soft reset bit, it puts the port into reset.
+ *
+ * The caller needs to hold lock for protection.
+ */
+int __afu_port_disable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ if (pdata->disable_count++ != 0)
+ return 0;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Set port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v |= PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+ v & PORT_CTRL_SFTRST_ACK,
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * This function resets the FPGA Port and its accelerator (AFU) by function
+ * __port_disable and __port_enable (set port soft reset bit and then clear
+ * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
+ * Reconfiguration. But it should never cause any system level issue, only
+ * functional failure (e.g. DMA or PR operation failure) and be recoverable
+ * from the failure.
+ *
+ * Note: the accelerator (AFU) is not accessible when its port is in reset
+ * (disabled). Any attempts on MMIO access to AFU while in reset, will
+ * result errors reported via port error reporting sub feature (if present).
+ */
+static int __port_reset(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = __afu_port_disable(pdev);
+ if (ret)
+ return ret;
+
+ return __afu_port_enable(pdev);
+}
+
+static int port_reset(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ mutex_lock(&pdata->lock);
+ ret = __port_reset(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int port_get_id(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
+}
+
+static ssize_t
+id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int id = port_get_id(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", id);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t
+ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
+}
+
+static ssize_t
+ltr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool ltr;
+ u64 v;
+
+ if (kstrtobool(buf, &ltr))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_LATENCY;
+ v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
+ writeq(v, base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ltr);
+
+static ssize_t
+ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
+}
+
+static ssize_t
+ap1_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap1_event);
+
+static ssize_t
+ap2_event_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
+}
+
+static ssize_t
+ap2_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap2_event);
+
+static ssize_t
+power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
+}
+static DEVICE_ATTR_RO(power_state);
+
+static ssize_t
+userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freq_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freq_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcmd);
+
+static ssize_t
+userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntr_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcntrcmd);
+
+static ssize_t
+userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
+}
+static DEVICE_ATTR_RO(userclk_freqsts);
+
+static ssize_t
+userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntrsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)userclk_freqcntrsts);
+}
+static DEVICE_ATTR_RO(userclk_freqcntrsts);
+
+static struct attribute *port_hdr_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_ltr.attr,
+ &dev_attr_ap1_event.attr,
+ &dev_attr_ap2_event.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_userclk_freqcmd.attr,
+ &dev_attr_userclk_freqcntrcmd.attr,
+ &dev_attr_userclk_freqsts.attr,
+ &dev_attr_userclk_freqcntrsts.attr,
+ NULL,
+};
+
+static umode_t port_hdr_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ umode_t mode = attr->mode;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ if (dfl_feature_revision(base) > 0) {
+ /*
+ * userclk sysfs interfaces are only visible in case port
+ * revision is 0, as hardware with revision >0 doesn't
+ * support this.
+ */
+ if (attr == &dev_attr_userclk_freqcmd.attr ||
+ attr == &dev_attr_userclk_freqcntrcmd.attr ||
+ attr == &dev_attr_userclk_freqsts.attr ||
+ attr == &dev_attr_userclk_freqcntrsts.attr)
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static const struct attribute_group port_hdr_group = {
+ .attrs = port_hdr_attrs,
+ .is_visible = port_hdr_attrs_visible,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ port_reset(pdev);
+
+ return 0;
+}
+
+static long
+port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_PORT_RESET:
+ if (!arg)
+ ret = port_reset(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static const struct dfl_feature_id port_hdr_id_table[] = {
+ {.id = PORT_FEATURE_ID_HEADER,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .ioctl = port_hdr_ioctl,
+};
+
+static ssize_t
+afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 guidl, guidh;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+
+ mutex_lock(&pdata->lock);
+ if (pdata->disable_count) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ guidl = readq(base + GUID_L);
+ guidh = readq(base + GUID_H);
+ mutex_unlock(&pdata->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
+}
+static DEVICE_ATTR_RO(afu_id);
+
+static struct attribute *port_afu_attrs[] = {
+ &dev_attr_afu_id.attr,
+ NULL
+};
+
+static umode_t port_afu_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group port_afu_group = {
+ .attrs = port_afu_attrs,
+ .is_visible = port_afu_attrs_visible,
+};
+
+static int port_afu_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct resource *res = &pdev->resource[feature->resource_index];
+
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
+}
+
+static const struct dfl_feature_id port_afu_id_table[] = {
+ {.id = PORT_FEATURE_ID_AFU,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+};
+
+static int port_stp_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct resource *res = &pdev->resource[feature->resource_index];
+
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_STP,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
+}
+
+static const struct dfl_feature_id port_stp_id_table[] = {
+ {.id = PORT_FEATURE_ID_STP,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_stp_ops = {
+ .init = port_stp_init,
+};
+
+static long
+port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_UINT_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
+static const struct dfl_feature_id port_uint_id_table[] = {
+ {.id = PORT_FEATURE_ID_UINT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_uint_ops = {
+ .ioctl = port_uint_ioctl,
+};
+
+static struct dfl_feature_driver port_feature_drvs[] = {
+ {
+ .id_table = port_hdr_id_table,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .id_table = port_afu_id_table,
+ .ops = &port_afu_ops,
+ },
+ {
+ .id_table = port_err_id_table,
+ .ops = &port_err_ops,
+ },
+ {
+ .id_table = port_stp_id_table,
+ .ops = &port_stp_ops,
+ },
+ {
+ .id_table = port_uint_id_table,
+ .ops = &port_uint_ops,
+ },
+ {
+ .ops = NULL,
+ }
+};
+
+static int afu_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&fdev->dev);
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = fdev;
+ }
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int afu_release(struct inode *inode, struct file *filp)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *feature;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+
+ if (!dfl_feature_dev_use_count(pdata)) {
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ }
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static long
+afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_info info;
+ struct dfl_afu *afu;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ info.flags = 0;
+ info.num_regions = afu->num_regions;
+ info.num_umsgs = afu->num_umsgs;
+ mutex_unlock(&pdata->lock);
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+ void __user *arg)
+{
+ struct dfl_fpga_port_region_info rinfo;
+ struct dfl_afu_mmio_region region;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
+
+ if (copy_from_user(&rinfo, arg, minsz))
+ return -EFAULT;
+
+ if (rinfo.argsz < minsz || rinfo.padding)
+ return -EINVAL;
+
+ ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ if (ret)
+ return ret;
+
+ rinfo.flags = region.flags;
+ rinfo.size = region.size;
+ rinfo.offset = region.offset;
+
+ if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_map map;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
+
+ if (copy_from_user(&map, arg, minsz))
+ return -EFAULT;
+
+ if (map.argsz < minsz || map.flags)
+ return -EINVAL;
+
+ ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, &map, sizeof(map))) {
+ afu_dma_unmap_region(pdata, map.iova);
+ return -EFAULT;
+ }
+
+ dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ (unsigned long long)map.user_addr,
+ (unsigned long long)map.length,
+ (unsigned long long)map.iova);
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_unmap unmap;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
+
+ if (copy_from_user(&unmap, arg, minsz))
+ return -EFAULT;
+
+ if (unmap.argsz < minsz || unmap.flags)
+ return -EINVAL;
+
+ return afu_dma_unmap_region(pdata, unmap.iova);
+}
+
+static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return afu_ioctl_check_extension(pdata, arg);
+ case DFL_FPGA_PORT_GET_INFO:
+ return afu_ioctl_get_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_GET_REGION_INFO:
+ return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_MAP:
+ return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_UNMAP:
+ return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 and other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f)
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct vm_operations_struct afu_vma_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_afu_mmio_region region;
+ u64 offset;
+ int ret;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ if (ret)
+ return ret;
+
+ if (!(region.flags & DFL_PORT_REGION_MMAP))
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
+ return -EPERM;
+
+ if ((vma->vm_flags & VM_WRITE) &&
+ !(region.flags & DFL_PORT_REGION_WRITE))
+ return -EPERM;
+
+ /* Support debug access to the mapping */
+ vma->vm_ops = &afu_vma_ops;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma, vma->vm_start,
+ (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations afu_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_open,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .mmap = afu_mmap,
+};
+
+static int afu_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
+ if (!afu)
+ return -ENOMEM;
+
+ afu->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, afu);
+ afu_mmio_region_init(pdata);
+ afu_dma_region_init(pdata);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int afu_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ afu_mmio_region_destroy(pdata);
+ afu_dma_region_destroy(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int port_enable_set(struct platform_device *pdev, bool enable)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ mutex_lock(&pdata->lock);
+ if (enable)
+ ret = __afu_port_enable(pdev);
+ else
+ ret = __afu_port_disable(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static struct dfl_fpga_port_ops afu_port_ops = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .owner = THIS_MODULE,
+ .get_id = port_get_id,
+ .enable_set = port_enable_set,
+};
+
+static int afu_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ ret = afu_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
+ if (ret) {
+ dfl_fpga_dev_feature_uinit(pdev);
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ afu_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int afu_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ afu_dev_destroy(pdev);
+
+ return 0;
+}
+
+static const struct attribute_group *afu_dev_groups[] = {
+ &port_hdr_group,
+ &port_afu_group,
+ &port_err_group,
+ NULL
+};
+
+static struct platform_driver afu_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .dev_groups = afu_dev_groups,
+ },
+ .probe = afu_probe,
+ .remove = afu_remove,
+};
+
+static int __init afu_init(void)
+{
+ int ret;
+
+ dfl_fpga_port_ops_add(&afu_port_ops);
+
+ ret = platform_driver_register(&afu_driver);
+ if (ret)
+ dfl_fpga_port_ops_del(&afu_port_ops);
+
+ return ret;
+}
+
+static void __exit afu_exit(void)
+{
+ platform_driver_unregister(&afu_driver);
+
+ dfl_fpga_port_ops_del(&afu_port_ops);
+}
+
+module_init(afu_init);
+module_exit(afu_exit);
+
+MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-port");
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
new file mode 100644
index 0000000000..2e7b416294
--- /dev/null
+++ b/drivers/fpga/dfl-afu-region.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) MMIO Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include "dfl-afu.h"
+
+/**
+ * afu_mmio_region_init - init function for afu mmio region support
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ INIT_LIST_HEAD(&afu->regions);
+}
+
+#define for_each_region(region, afu) \
+ list_for_each_entry((region), &(afu)->regions, node)
+
+static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
+ u32 region_index)
+{
+ struct dfl_afu_mmio_region *region;
+
+ for_each_region(region, afu)
+ if (region->index == region_index)
+ return region;
+
+ return NULL;
+}
+
+/**
+ * afu_mmio_region_add - add a mmio region to given feature dev.
+ *
+ * @pdata: afu platform device's pdata.
+ * @region_index: region index.
+ * @region_size: region size.
+ * @phys: region's physical address of this region.
+ * @flags: region flags (access permission).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->index = region_index;
+ region->size = region_size;
+ region->phys = phys;
+ region->flags = flags;
+
+ mutex_lock(&pdata->lock);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+
+ /* check if @index already exists */
+ if (get_region_by_index(afu, region_index)) {
+ mutex_unlock(&pdata->lock);
+ ret = -EEXIST;
+ goto exit;
+ }
+
+ region_size = PAGE_ALIGN(region_size);
+ region->offset = afu->region_cur_offset;
+ list_add(&region->node, &afu->regions);
+
+ afu->region_cur_offset += region_size;
+ afu->num_regions++;
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+exit:
+ devm_kfree(&pdata->dev->dev, region);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu_mmio_region *tmp, *region;
+
+ list_for_each_entry_safe(region, tmp, &afu->regions, node)
+ devm_kfree(&pdata->dev->dev, region);
+}
+
+/**
+ * afu_mmio_region_get_by_index - find an afu region by index.
+ * @pdata: afu platform device's pdata.
+ * @region_index: region index.
+ * @pregion: ptr to region for result.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ region = get_region_by_index(afu, region_index);
+ if (!region) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ *pregion = *region;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
+ *
+ * @pdata: afu platform device's pdata.
+ * @offset: region offset from start of the device fd.
+ * @size: region size.
+ * @pregion: ptr to region for result.
+ *
+ * Find the region which fully contains the region described by input
+ * parameters (offset and size) from the feature dev's region linked list.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ for_each_region(region, afu)
+ if (region->offset <= offset &&
+ region->offset + region->size >= offset + size) {
+ *pregion = *region;
+ goto exit;
+ }
+ ret = -EINVAL;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
new file mode 100644
index 0000000000..674e9772f0
--- /dev/null
+++ b/drivers/fpga/dfl-afu.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Accelerated Function Unit (AFU) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_AFU_H
+#define __DFL_AFU_H
+
+#include <linux/mm.h>
+
+#include "dfl.h"
+
+/**
+ * struct dfl_afu_mmio_region - afu mmio region data structure
+ *
+ * @index: region index.
+ * @flags: region flags (access permission).
+ * @size: region size.
+ * @offset: region offset from start of the device fd.
+ * @phys: region's physical address.
+ * @node: node to add to afu feature dev's region list.
+ */
+struct dfl_afu_mmio_region {
+ u32 index;
+ u32 flags;
+ u64 size;
+ u64 offset;
+ u64 phys;
+ struct list_head node;
+};
+
+/**
+ * struct dfl_afu_dma_region - afu DMA region data structure
+ *
+ * @user_addr: region userspace virtual address.
+ * @length: region length.
+ * @iova: region IO virtual address.
+ * @pages: ptr to pages of this region.
+ * @node: rb tree node.
+ * @in_use: flag to indicate if this region is in_use.
+ */
+struct dfl_afu_dma_region {
+ u64 user_addr;
+ u64 length;
+ u64 iova;
+ struct page **pages;
+ struct rb_node node;
+ bool in_use;
+};
+
+/**
+ * struct dfl_afu - afu device data structure
+ *
+ * @region_cur_offset: current region offset from start to the device fd.
+ * @num_regions: num of mmio regions.
+ * @regions: the mmio region linked list of this afu feature device.
+ * @dma_regions: root of dma regions rb tree.
+ * @num_umsgs: num of umsgs.
+ * @pdata: afu platform device's pdata.
+ */
+struct dfl_afu {
+ u64 region_cur_offset;
+ int num_regions;
+ u8 num_umsgs;
+ struct list_head regions;
+ struct rb_root dma_regions;
+
+ struct dfl_feature_platform_data *pdata;
+};
+
+/* hold pdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct platform_device *pdev);
+int __afu_port_disable(struct platform_device *pdev);
+
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags);
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion);
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion);
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova);
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+ u64 iova, u64 size);
+
+extern const struct dfl_feature_ops port_err_ops;
+extern const struct dfl_feature_id port_err_id_table[];
+extern const struct attribute_group port_err_group;
+
+#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
new file mode 100644
index 0000000000..808d1f4d76
--- /dev/null
+++ b/drivers/fpga/dfl-fme-br.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Bridge Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#include "dfl.h"
+#include "dfl-fme-pr.h"
+
+struct fme_br_priv {
+ struct dfl_fme_br_pdata *pdata;
+ struct dfl_fpga_port_ops *port_ops;
+ struct platform_device *port_pdev;
+};
+
+static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct fme_br_priv *priv = bridge->priv;
+ struct platform_device *port_pdev;
+ struct dfl_fpga_port_ops *ops;
+
+ if (!priv->port_pdev) {
+ port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ return -ENODEV;
+
+ priv->port_pdev = port_pdev;
+ }
+
+ if (priv->port_pdev && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (!ops || !ops->enable_set)
+ return -ENOENT;
+
+ priv->port_ops = ops;
+ }
+
+ return priv->port_ops->enable_set(priv->port_pdev, enable);
+}
+
+static const struct fpga_bridge_ops fme_bridge_ops = {
+ .enable_set = fme_bridge_enable_set,
+};
+
+static int fme_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fme_br_priv *priv;
+ struct fpga_bridge *br;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(dev);
+
+ br = fpga_bridge_register(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
+
+ platform_set_drvdata(pdev, br);
+
+ return 0;
+}
+
+static int fme_br_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+ struct fme_br_priv *priv = br->priv;
+
+ fpga_bridge_unregister(br);
+
+ if (priv->port_pdev)
+ put_device(&priv->port_pdev->dev);
+ if (priv->port_ops)
+ dfl_fpga_port_ops_put(priv->port_ops);
+
+ return 0;
+}
+
+static struct platform_driver fme_br_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
+ },
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
+};
+
+module_platform_driver(fme_br_driver);
+
+MODULE_DESCRIPTION("FPGA Bridge for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-bridge");
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
new file mode 100644
index 0000000000..51c2892ec0
--- /dev/null
+++ b/drivers/fpga/dfl-fme-error.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine Error Management
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/fpga-dfl.h>
+#include <linux/uaccess.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+#define FME_ERROR_MASK 0x8
+#define FME_ERROR 0x10
+#define MBP_ERROR BIT_ULL(6)
+#define PCIE0_ERROR_MASK 0x18
+#define PCIE0_ERROR 0x20
+#define PCIE1_ERROR_MASK 0x28
+#define PCIE1_ERROR 0x30
+#define FME_FIRST_ERROR 0x38
+#define FME_NEXT_ERROR 0x40
+#define RAS_NONFAT_ERROR_MASK 0x48
+#define RAS_NONFAT_ERROR 0x50
+#define RAS_CATFAT_ERROR_MASK 0x58
+#define RAS_CATFAT_ERROR 0x60
+#define RAS_ERROR_INJECT 0x68
+#define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+static ssize_t pcie0_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE0_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie0_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
+
+ v = readq(base + PCIE0_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE0_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE0_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie0_errors);
+
+static ssize_t pcie1_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE1_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie1_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
+
+ v = readq(base + PCIE1_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE1_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE1_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie1_errors);
+
+static ssize_t nonfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_NONFAT_ERROR));
+}
+static DEVICE_ATTR_RO(nonfatal_errors);
+
+static ssize_t catfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_CATFAT_ERROR));
+}
+static DEVICE_ATTR_RO(catfatal_errors);
+
+static ssize_t inject_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
+}
+
+static ssize_t inject_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u8 inject_error;
+ u64 v;
+
+ if (kstrtou8(buf, 0, &inject_error))
+ return -EINVAL;
+
+ if (inject_error & ~INJECT_ERROR_MASK)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ v &= ~INJECT_ERROR_MASK;
+ v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
+ writeq(v, base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_errors);
+
+static ssize_t fme_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t fme_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v, val;
+ int ret = 0;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
+
+ v = readq(base + FME_ERROR);
+ if (val == v)
+ writeq(v, base + FME_ERROR);
+ else
+ ret = -EINVAL;
+
+ /* Workaround: disable MBP_ERROR if feature revision is 0 */
+ writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
+ base + FME_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(fme_errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t next_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_NEXT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(next_error);
+
+static struct attribute *fme_global_err_attrs[] = {
+ &dev_attr_pcie0_errors.attr,
+ &dev_attr_pcie1_errors.attr,
+ &dev_attr_nonfatal_errors.attr,
+ &dev_attr_catfatal_errors.attr,
+ &dev_attr_inject_errors.attr,
+ &dev_attr_fme_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_next_error.attr,
+ NULL,
+};
+
+static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group fme_global_err_group = {
+ .name = "errors",
+ .attrs = fme_global_err_attrs,
+ .is_visible = fme_global_err_attrs_visible,
+};
+
+static void fme_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+
+ /* Workaround: keep MBP_ERROR always masked if revision is 0 */
+ if (dfl_feature_revision(base))
+ writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
+ else
+ writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
+
+ writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
+
+ mutex_unlock(&pdata->lock);
+}
+
+static int fme_global_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void fme_global_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, true);
+}
+
+static long
+fme_global_error_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_FME_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
+const struct dfl_feature_id fme_global_err_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_ERR,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_global_err_ops = {
+ .init = fme_global_err_init,
+ .uinit = fme_global_err_uinit,
+ .ioctl = fme_global_error_ioctl,
+};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
new file mode 100644
index 0000000000..3dcf990bd2
--- /dev/null
+++ b/drivers/fpga/dfl-fme-main.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/units.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+static ssize_t ports_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
+}
+static DEVICE_ATTR_RO(ports_num);
+
+/*
+ * Bitstream (static FPGA region) identifier number. It contains the
+ * detailed version and other information of this static FPGA region.
+ */
+static ssize_t bitstream_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_ID);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_id);
+
+/*
+ * Bitstream (static FPGA region) meta data. It contains the synthesis
+ * date, seed and other information of this static FPGA region.
+ */
+static ssize_t bitstream_metadata_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_MD);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_metadata);
+
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t fabric_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
+}
+static DEVICE_ATTR_RO(fabric_version);
+
+static ssize_t socket_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
+}
+static DEVICE_ATTR_RO(socket_id);
+
+static struct attribute *fme_hdr_attrs[] = {
+ &dev_attr_ports_num.attr,
+ &dev_attr_bitstream_id.attr,
+ &dev_attr_bitstream_metadata.attr,
+ &dev_attr_cache_size.attr,
+ &dev_attr_fabric_version.attr,
+ &dev_attr_socket_id.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_hdr_group = {
+ .attrs = fme_hdr_attrs,
+};
+
+static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
+
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
+
+ return dfl_fpga_cdev_release_port(cdev, port_id);
+}
+
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
+
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
+
+ return dfl_fpga_cdev_assign_port(cdev, port_id);
+}
+
+static long fme_hdr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_RELEASE:
+ return fme_hdr_ioctl_release_port(pdata, arg);
+ case DFL_FPGA_FME_PORT_ASSIGN:
+ return fme_hdr_ioctl_assign_port(pdata, arg);
+ }
+
+ return -ENODEV;
+}
+
+static const struct dfl_feature_id fme_hdr_id_table[] = {
+ {.id = FME_FEATURE_ID_HEADER,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_hdr_ops = {
+ .ioctl = fme_hdr_ioctl,
+};
+
+#define FME_THERM_THRESHOLD 0x8
+#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN BIT_ULL(7)
+#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN BIT_ULL(15)
+#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1 0x10
+#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP 0x20
+#define THERM_NO_THROTTLE BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+ u64 v = readq(base + FME_THERM_CAP);
+
+ return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dfl_feature *feature = drvdata;
+
+ /* temperature is always supported, and check hardware cap for others */
+ if (attr == hwmon_temp_input)
+ return 0444;
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
+ break;
+ case hwmon_temp_max:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
+ break;
+ case hwmon_temp_crit:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
+ break;
+ case hwmon_temp_emergency:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
+ break;
+ case hwmon_temp_max_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_temp_crit_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+ .is_visible = thermal_hwmon_attrs_visible,
+ .read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+ .ops = &thermal_hwmon_ops,
+ .info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+ &dev_attr_temp1_max_policy.attr,
+ NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+ .attrs = thermal_extra_attrs,
+ .is_visible = thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ /*
+ * create hwmon to allow userspace monitoring temperature and other
+ * threshold information.
+ *
+ * temp1_input -> FPGA device temperature
+ * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
+ * temp1_crit -> hardware threshold 2 -> 100% throttling
+ * temp1_emergency -> hardware trip_threshold to shutdown FPGA
+ * temp1_max_alarm -> hardware threshold 1 alarm
+ * temp1_crit_alarm -> hardware threshold 2 alarm
+ *
+ * create device specific sysfs interfaces, e.g. read temp1_max_policy
+ * to understand the actual hardware throttling action (50% vs 90%).
+ *
+ * If hardware doesn't support automatic throttling per thresholds,
+ * then all above sysfs interfaces are not visible except temp1_input
+ * for temperature.
+ */
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_thermal", feature,
+ &thermal_hwmon_chip_info,
+ thermal_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_THERMAL_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS 0x8
+#define FME_LATENCY_TOLERANCE BIT_ULL(18)
+#define PWR_CONSUMED GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD 0x10
+#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
+#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
+#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
+#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT 0x18
+#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define XEON_PWR_EN BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT 0x20
+#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define FPGA_PWR_EN BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_power_input:
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
+ break;
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
+ break;
+ case hwmon_power_max_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_power_crit_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ int ret = 0;
+ u64 v;
+
+ val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
+
+ mutex_lock(&pdata->lock);
+
+ switch (attr) {
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD1;
+ v |= FIELD_PREP(PWR_THRESHOLD1, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD2;
+ v |= FIELD_PREP(PWR_THRESHOLD2, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_crit:
+ return 0644;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+ .is_visible = power_hwmon_attrs_visible,
+ .read = power_hwmon_read,
+ .write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info * const power_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MAX_ALARM |
+ HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+ .ops = &power_hwmon_ops,
+ .info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 xeon_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+ if (FIELD_GET(XEON_PWR_EN, v))
+ xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 fpga_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+ if (FIELD_GET(FPGA_PWR_EN, v))
+ fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_xeon_limit.attr,
+ &dev_attr_power1_fpga_limit.attr,
+ &dev_attr_power1_ltr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_power", feature,
+ &power_hwmon_chip_info,
+ power_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register power hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_POWER_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+};
+
+static struct dfl_feature_driver fme_feature_drvs[] = {
+ {
+ .id_table = fme_hdr_id_table,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .id_table = fme_pr_mgmt_id_table,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .id_table = fme_global_err_id_table,
+ .ops = &fme_global_err_ops,
+ },
+ {
+ .id_table = fme_thermal_mgmt_id_table,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .id_table = fme_power_mgmt_id_table,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
+ .id_table = fme_perf_id_table,
+ .ops = &fme_perf_ops,
+ },
+ {
+ .ops = NULL,
+ },
+};
+
+static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static int fme_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ int ret;
+
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = pdata;
+ }
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int fme_release(struct inode *inode, struct file *filp)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *feature;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+
+ if (!dfl_feature_dev_use_count(pdata))
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return fme_ioctl_check_extension(pdata, arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd.
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 or other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f) {
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fme_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
+ if (!fme)
+ return -ENOMEM;
+
+ fme->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, fme);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static void fme_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+}
+
+static const struct file_operations fme_fops = {
+ .owner = THIS_MODULE,
+ .open = fme_open,
+ .release = fme_release,
+ .unlocked_ioctl = fme_ioctl,
+};
+
+static int fme_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = fme_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
+ if (ret)
+ goto feature_uinit;
+
+ return 0;
+
+feature_uinit:
+ dfl_fpga_dev_feature_uinit(pdev);
+dev_destroy:
+ fme_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int fme_remove(struct platform_device *pdev)
+{
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ fme_dev_destroy(pdev);
+
+ return 0;
+}
+
+static const struct attribute_group *fme_dev_groups[] = {
+ &fme_hdr_group,
+ &fme_global_err_group,
+ NULL
+};
+
+static struct platform_driver fme_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ .dev_groups = fme_dev_groups,
+ },
+ .probe = fme_probe,
+ .remove = fme_remove,
+};
+
+module_platform_driver(fme_driver);
+
+MODULE_DESCRIPTION("FPGA Management Engine driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme");
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
new file mode 100644
index 0000000000..ab228d8837
--- /dev/null
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#include "dfl-fme-pr.h"
+
+/* FME Partial Reconfiguration Sub Feature Register Set */
+#define FME_PR_DFH 0x0
+#define FME_PR_CTRL 0x8
+#define FME_PR_STS 0x10
+#define FME_PR_DATA 0x18
+#define FME_PR_ERR 0x20
+#define FME_PR_INTFC_ID_L 0xA8
+#define FME_PR_INTFC_ID_H 0xB0
+
+/* FME PR Control Register Bitfield */
+#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
+#define FME_PR_CTRL_PR_RSTACK BIT_ULL(4) /* Ack for PR engine reset */
+#define FME_PR_CTRL_PR_RGN_ID GENMASK_ULL(9, 7) /* PR Region ID */
+#define FME_PR_CTRL_PR_START BIT_ULL(12) /* Start to request PR service */
+#define FME_PR_CTRL_PR_COMPLETE BIT_ULL(13) /* PR data push completion */
+
+/* FME PR Status Register Bitfield */
+/* Number of available entries in HW queue inside the PR engine. */
+#define FME_PR_STS_PR_CREDIT GENMASK_ULL(8, 0)
+#define FME_PR_STS_PR_STS BIT_ULL(16) /* PR operation status */
+#define FME_PR_STS_PR_STS_IDLE 0
+#define FME_PR_STS_PR_CTRLR_STS GENMASK_ULL(22, 20) /* Controller status */
+#define FME_PR_STS_PR_HOST_STS GENMASK_ULL(27, 24) /* PR host status */
+
+/* FME PR Data Register Bitfield */
+/* PR data from the raw-binary file. */
+#define FME_PR_DATA_PR_DATA_RAW GENMASK_ULL(32, 0)
+
+/* FME PR Error Register */
+/* PR Operation errors detected. */
+#define FME_PR_ERR_OPERATION_ERR BIT_ULL(0)
+/* CRC error detected. */
+#define FME_PR_ERR_CRC_ERR BIT_ULL(1)
+/* Incompatible PR bitstream detected. */
+#define FME_PR_ERR_INCOMPATIBLE_BS BIT_ULL(2)
+/* PR data push protocol violated. */
+#define FME_PR_ERR_PROTOCOL_ERR BIT_ULL(3)
+/* PR data fifo overflow error detected */
+#define FME_PR_ERR_FIFO_OVERFLOW BIT_ULL(4)
+
+#define PR_WAIT_TIMEOUT 8000000
+#define PR_HOST_STATUS_IDLE 0
+
+struct fme_mgr_priv {
+ void __iomem *ioaddr;
+ u64 pr_error;
+};
+
+static u64 pr_error_to_mgr_status(u64 err)
+{
+ u64 status = 0;
+
+ if (err & FME_PR_ERR_OPERATION_ERR)
+ status |= FPGA_MGR_STATUS_OPERATION_ERR;
+ if (err & FME_PR_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (err & FME_PR_ERR_INCOMPATIBLE_BS)
+ status |= FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR;
+ if (err & FME_PR_ERR_PROTOCOL_ERR)
+ status |= FPGA_MGR_STATUS_IP_PROTOCOL_ERR;
+ if (err & FME_PR_ERR_FIFO_OVERFLOW)
+ status |= FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR;
+
+ return status;
+}
+
+static u64 fme_mgr_pr_error_handle(void __iomem *fme_pr)
+{
+ u64 pr_status, pr_error;
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ if (!(pr_status & FME_PR_STS_PR_STS))
+ return 0;
+
+ pr_error = readq(fme_pr + FME_PR_ERR);
+ writeq(pr_error, fme_pr + FME_PR_ERR);
+
+ return pr_error;
+}
+
+static int fme_mgr_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(dev, "only supports partial reconfiguration.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "resetting PR before initiated PR\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ pr_ctrl & FME_PR_CTRL_PR_RSTACK, 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Reset ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev,
+ "waiting for PR resource in HW to be initialized and ready\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_STS, pr_status,
+ (pr_status & FME_PR_STS_PR_STS) ==
+ FME_PR_STS_PR_STS_IDLE, 1, PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Status timeout\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "check and clear previous PR error\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error)
+ dev_dbg(dev, "previous PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+
+ dev_dbg(dev, "set PR port ID\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RGN_ID;
+ pr_ctrl |= FIELD_PREP(FME_PR_CTRL_PR_RGN_ID, info->region_id);
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ return 0;
+}
+
+static int fme_mgr_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status, pr_data;
+ int delay = 0, pr_credit, i = 0;
+
+ dev_dbg(dev, "start request\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_START;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "pushing data from bitstream to HW\n");
+
+ /*
+ * driver can push data to PR hardware using PR_DATA register once HW
+ * has enough pr_credit (> 1), pr_credit reduces one for every 32bit
+ * pr data write to PR_DATA register. If pr_credit <= 1, driver needs
+ * to wait for enough pr_credit from hardware by polling.
+ */
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+
+ while (count > 0) {
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(dev, "PR_CREDIT timeout\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+ }
+
+ if (count < 4) {
+ dev_err(dev, "Invalid PR bitstream size\n");
+ return -EINVAL;
+ }
+
+ pr_data = 0;
+ pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW,
+ *(((u32 *)buf) + i));
+ writeq(pr_data, fme_pr + FME_PR_DATA);
+ count -= 4;
+ pr_credit--;
+ i++;
+ }
+
+ return 0;
+}
+
+static int fme_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl;
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_COMPLETE;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "green bitstream push complete\n");
+ dev_dbg(dev, "waiting for HW to release PR resource\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ !(pr_ctrl & FME_PR_CTRL_PR_START), 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Completion ACK timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "PR operation complete, checking status\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error) {
+ dev_dbg(dev, "PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "PR done successfully\n");
+
+ return 0;
+}
+
+static u64 fme_mgr_status(struct fpga_manager *mgr)
+{
+ struct fme_mgr_priv *priv = mgr->priv;
+
+ return pr_error_to_mgr_status(priv->pr_error);
+}
+
+static const struct fpga_manager_ops fme_mgr_ops = {
+ .write_init = fme_mgr_write_init,
+ .write = fme_mgr_write,
+ .write_complete = fme_mgr_write_complete,
+ .status = fme_mgr_status,
+};
+
+static void fme_mgr_get_compat_id(void __iomem *fme_pr,
+ struct fpga_compat_id *id)
+{
+ id->id_l = readq(fme_pr + FME_PR_INTFC_ID_L);
+ id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H);
+}
+
+static int fme_mgr_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_manager_info info = { 0 };
+ struct device *dev = &pdev->dev;
+ struct fme_mgr_priv *priv;
+ struct fpga_manager *mgr;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (pdata->ioaddr)
+ priv->ioaddr = pdata->ioaddr;
+
+ if (!priv->ioaddr) {
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->ioaddr))
+ return PTR_ERR(priv->ioaddr);
+ }
+
+ info.name = "DFL FME FPGA Manager";
+ info.mops = &fme_mgr_ops;
+ info.priv = priv;
+ info.compat_id = devm_kzalloc(dev, sizeof(*info.compat_id), GFP_KERNEL);
+ if (!info.compat_id)
+ return -ENOMEM;
+
+ fme_mgr_get_compat_id(priv->ioaddr, info.compat_id);
+ mgr = devm_fpga_mgr_register_full(dev, &info);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static struct platform_driver fme_mgr_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_MGR,
+ },
+ .probe = fme_mgr_probe,
+};
+
+module_platform_driver(fme_mgr_driver);
+
+MODULE_DESCRIPTION("FPGA Manager for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-mgr");
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 0000000000..7422d2bc6f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL 0x8
+#define CACHE_RESET_CNTR BIT_ULL(0)
+#define CACHE_FREEZE_CNTR BIT_ULL(8)
+#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT 0x0
+#define CACHE_EVNT_WR_HIT 0x1
+#define CACHE_EVNT_RD_MISS 0x2
+#define CACHE_EVNT_WR_MISS 0x3
+#define CACHE_EVNT_RSVD 0x4
+#define CACHE_EVNT_HOLD_REQ 0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
+#define CACHE_EVNT_TX_REQ_STALL 0x8
+#define CACHE_EVNT_RX_REQ_STALL 0x9
+#define CACHE_EVNT_EVICTIONS 0xa
+#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL BIT_ULL(20)
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+#define CACHE_CNTR0 0x10
+#define CACHE_CNTR1 0x18
+#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL 0x20
+#define FAB_RESET_CNTR BIT_ULL(0)
+#define FAB_FREEZE_CNTR BIT_ULL(8)
+#define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD 0x0
+#define FAB_EVNT_PCIE0_WR 0x1
+#define FAB_EVNT_PCIE1_RD 0x2
+#define FAB_EVNT_PCIE1_WR 0x3
+#define FAB_EVNT_UPI_RD 0x4
+#define FAB_EVNT_UPI_WR 0x5
+#define FAB_EVNT_MMIO_RD 0x6
+#define FAB_EVNT_MMIO_WR 0x7
+#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE 0
+#define FAB_PORT_FILTER_ENABLE 1
+#define FAB_CNTR 0x28
+#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR 0x30
+#define BASIC_EVNT_CLK 0x0
+#define BASIC_EVNT_MAX BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL 0x38
+#define VTD_RESET_CNTR BIT_ULL(0)
+#define VTD_FREEZE_CNTR BIT_ULL(8)
+#define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
+#define VTD_EVNT_DEVTLB_4K_FILL 0x4
+#define VTD_EVNT_DEVTLB_2M_FILL 0x5
+#define VTD_EVNT_DEVTLB_1G_FILL 0x6
+#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR 0x40
+#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL 0x48
+#define VTD_SIP_RESET_CNTR BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
+#define VTD_SIP_EVNT_RCC_HIT 0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
+#define VTD_SIP_EVNT_RCC_MISS 0xb
+#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR 0X50
+#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT 30
+
+#define PERF_MAX_PORT_NUM 1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+ struct device *dev;
+ void __iomem *ioaddr;
+ struct pmu pmu;
+ u16 id;
+
+ u32 fab_users;
+ u32 fab_port_id;
+ spinlock_t fab_lock;
+
+ unsigned int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+ int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+ void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+ u32 portid);
+ u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct fme_perf_priv *priv;
+
+ priv = to_fme_perf_priv(pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_cpumask_group = {
+ .attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT 0
+#define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT 12
+#define FME_EVTYPE_BASIC 0
+#define FME_EVTYPE_CACHE 1
+#define FME_EVTYPE_FABRIC 2
+#define FME_EVTYPE_VTD 3
+#define FME_EVTYPE_VTD_SIP 4
+#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT 16
+#define FME_PORTID_ROOT (0xffU)
+
+#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(evtype, "config:12-15");
+PMU_FORMAT_ATTR(portid, "config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_evtype.attr,
+ &format_attr_portid.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_format_group = {
+ .name = "format",
+ .attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+ NULL,
+};
+
+static const struct attribute_group fme_perf_events_group = {
+ .name = "events",
+ .attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+ &fme_perf_format_group,
+ &fme_perf_cpumask_group,
+ &fme_perf_events_group,
+ NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+ return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+ return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+ return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+ u32 low;
+ u64 v;
+
+ /*
+ * For 64bit counter registers, the counter may increases and carries
+ * out of bit [31] between 2 32bit reads. So add extra reads to help
+ * to prevent this issue. This only happens in platforms which don't
+ * support 64bit read - readq is split into 2 readl.
+ */
+ do {
+ v = readq(addr);
+ low = readl(addr);
+ } while (((u32)v) > low);
+
+ return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+
+ return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= CACHE_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v, count;
+ u8 channel;
+
+ if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+ event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+ event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+ channel = CACHE_CHANNEL_WR;
+ else
+ channel = CACHE_CHANNEL_RD;
+
+ /* set channel access type and cache event code. */
+ v = readq(base + CACHE_CTRL);
+ v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+ v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+ v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+ writeq(v, base + CACHE_CTRL);
+
+ if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+ FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+ count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+ count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+ return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+ return false;
+
+ if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+ (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+ event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+ return false;
+
+ return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ int ret = 0;
+ u64 v;
+
+ if (!is_fabric_event_supported(priv, event, portid))
+ return -EINVAL;
+
+ /*
+ * as fabric counter set only can be in either overall or port mode.
+ * In overall mode, it counts overall data for FPGA, and in port mode,
+ * it is configured to monitor on one individual port.
+ *
+ * so every time, a new event is initialized, driver checks
+ * current working mode and if someone is using this counter set.
+ */
+ spin_lock(&priv->fab_lock);
+ if (priv->fab_users && priv->fab_port_id != portid) {
+ dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ priv->fab_users++;
+
+ /*
+ * skip if current working mode matches, otherwise change the working
+ * mode per input port_id, to monitor overall data or another port.
+ */
+ if (priv->fab_port_id == portid)
+ goto exit;
+
+ priv->fab_port_id = portid;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+ if (is_portid_root(portid)) {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+ } else {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+ v |= FIELD_PREP(FAB_PORT_ID, portid);
+ }
+ writeq(v, base + FAB_CTRL);
+
+exit:
+ spin_unlock(&priv->fab_lock);
+ return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ spin_lock(&priv->fab_lock);
+ priv->fab_users--;
+ spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~FAB_CTRL_EVNT;
+ v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+ writeq(v, base + FAB_CTRL);
+
+ if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+ FIELD_GET(FAB_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+ return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_EVNT_MAX && is_portid_port(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ event += (portid * (VTD_EVNT_MAX + 1));
+
+ v = readq(base + VTD_CTRL);
+ v &= ~VTD_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+ writeq(v, base + VTD_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+ FIELD_GET(VTD_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+ return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + VTD_SIP_CTRL);
+ v &= ~VTD_SIP_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+ writeq(v, base + VTD_SIP_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+ FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+ return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+ [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
+ .read_counter = basic_read_event_counter,},
+ [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
+ .read_counter = cache_read_event_counter,},
+ [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
+ .event_destroy = fabric_event_destroy,
+ .read_counter = fabric_read_event_counter,},
+ [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
+ .read_counter = vtd_read_event_counter,},
+ [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
+ .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+ unsigned long config;
+ char *ptr = buf;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ config = (unsigned long)eattr->var;
+
+ ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+ ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+ if (is_portid_root(get_portid(config)))
+ ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+ else
+ ptr += sprintf(ptr, ",portid=?\n");
+
+ return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+ __ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
+ (FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_##_name = { \
+ .attr = FME_EVENT_ATTR(_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+ &fme_perf_event_clock.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+ .name = "events",
+ .attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
+ .attr = FME_EVENT_ATTR(cache_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
+}
+
+FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+ &fme_perf_event_cache_read_hit.attr.attr,
+ &fme_perf_event_cache_read_miss.attr.attr,
+ &fme_perf_event_cache_write_hit.attr.attr,
+ &fme_perf_event_cache_write_miss.attr.attr,
+ &fme_perf_event_cache_hold_request.attr.attr,
+ &fme_perf_event_cache_tx_req_stall.attr.attr,
+ &fme_perf_event_cache_rx_req_stall.attr.attr,
+ &fme_perf_event_cache_eviction.attr.attr,
+ &fme_perf_event_cache_data_write_port_contention.attr.attr,
+ &fme_perf_event_cache_tag_write_port_contention.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+ return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+ .name = "events",
+ .attrs = fme_perf_cache_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+ &fme_perf_event_fab_pcie0_read.attr.attr,
+ &fme_perf_event_fab_pcie0_write.attr.attr,
+ &fme_perf_event_fab_pcie1_read.attr.attr,
+ &fme_perf_event_fab_pcie1_write.attr.attr,
+ &fme_perf_event_fab_upi_read.attr.attr,
+ &fme_perf_event_fab_upi_write.attr.attr,
+ &fme_perf_event_fab_mmio_read.attr.attr,
+ &fme_perf_event_fab_mmio_write.attr.attr,
+ &fme_perf_event_fab_port_pcie0_read.attr.attr,
+ &fme_perf_event_fab_port_pcie0_write.attr.attr,
+ &fme_perf_event_fab_port_pcie1_read.attr.attr,
+ &fme_perf_event_fab_port_pcie1_write.attr.attr,
+ &fme_perf_event_fab_port_upi_read.attr.attr,
+ &fme_perf_event_fab_port_upi_write.attr.attr,
+ &fme_perf_event_fab_port_mmio_read.attr.attr,
+ &fme_perf_event_fab_port_mmio_write.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+ struct dev_ext_attribute *eattr;
+ unsigned long var;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+ var = (unsigned long)eattr->var;
+
+ if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+ .name = "events",
+ .attrs = fme_perf_fabric_events_attrs,
+ .is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
+}
+
+FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+ &fme_perf_event_vtd_port_read_transaction.attr.attr,
+ &fme_perf_event_vtd_port_write_transaction.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+ &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_sip_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+ &fme_perf_basic_events_group,
+ &fme_perf_cache_events_group,
+ &fme_perf_fabric_events_group,
+ &fme_perf_vtd_events_group,
+ &fme_perf_vtd_sip_events_group,
+ NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+ if (evtype > FME_EVTYPE_MAX)
+ return NULL;
+
+ return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+ if (ops->event_destroy)
+ ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct fme_perf_event_ops *ops;
+ u32 eventid, evtype, portid;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * fme counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->cpu != priv->cpu)
+ return -EINVAL;
+
+ eventid = get_event(event->attr.config);
+ portid = get_portid(event->attr.config);
+ evtype = get_evtype(event->attr.config);
+ if (evtype > FME_EVTYPE_MAX)
+ return -EINVAL;
+
+ hwc->event_base = evtype;
+ hwc->idx = (int)eventid;
+ hwc->config_base = portid;
+
+ event->destroy = fme_perf_event_destroy;
+
+ dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+ __func__, eventid, evtype, portid);
+
+ ops = get_event_ops(evtype);
+ if (ops->event_init)
+ return ops->event_init(priv, eventid, portid);
+
+ return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 now, prev, delta;
+
+ now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ prev = local64_read(&hwc->prev_count);
+ delta = now - prev;
+
+ local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+ fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ fme_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+ fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+ fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ /* read and save current working mode for fabric counters */
+ v = readq(base + FAB_CTRL);
+
+ if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+ priv->fab_port_id = FME_PORTID_ROOT;
+ else
+ priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+ struct fme_perf_priv *priv)
+{
+ struct pmu *pmu = &priv->pmu;
+ char *name;
+ int ret;
+
+ spin_lock_init(&priv->fab_lock);
+
+ fme_perf_setup_hardware(priv);
+
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = fme_perf_groups;
+ pmu->attr_update = fme_perf_events_groups;
+ pmu->event_init = fme_perf_event_init;
+ pmu->add = fme_perf_event_add;
+ pmu->del = fme_perf_event_del;
+ pmu->start = fme_perf_event_start;
+ pmu->stop = fme_perf_event_stop;
+ pmu->read = fme_perf_event_read;
+ pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+ ret = perf_pmu_register(pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+ perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct fme_perf_priv *priv;
+ int target;
+
+ priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+ if (cpu != priv->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ priv->cpu = target;
+ perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
+ return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->ioaddr = feature->ioaddr;
+ priv->id = feature->id;
+ priv->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fpga/dfl_fme:online",
+ NULL, fme_perf_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ priv->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+ if (ret)
+ goto cpuhp_instance_err;
+
+ ret = fme_perf_pmu_register(pdev, priv);
+ if (ret)
+ goto pmu_register_err;
+
+ feature->priv = priv;
+ return 0;
+
+pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+ return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv = feature->priv;
+
+ fme_perf_pmu_unregister(priv);
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+ {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+ .init = fme_perf_init,
+ .uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
new file mode 100644
index 0000000000..cdcf6dea4c
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Partial Reconfiguration
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+#include "dfl-fme-pr.h"
+
+static struct dfl_fme_region *
+dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+
+ list_for_each_entry(fme_region, &fme->region_list, node)
+ if (fme_region->port_id == port_id)
+ return fme_region;
+
+ return NULL;
+}
+
+static int dfl_fme_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+ struct fpga_region *region;
+
+ fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
+ if (!fme_region)
+ return NULL;
+
+ region = fpga_region_class_find(NULL, &fme_region->region->dev,
+ dfl_fme_region_match);
+ if (!region)
+ return NULL;
+
+ return region;
+}
+
+static int fme_pr(struct platform_device *pdev, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __user *argp = (void __user *)arg;
+ struct dfl_fpga_fme_port_pr port_pr;
+ struct fpga_image_info *info;
+ struct fpga_region *region;
+ void __iomem *fme_hdr;
+ struct dfl_fme *fme;
+ unsigned long minsz;
+ void *buf = NULL;
+ size_t length;
+ int ret = 0;
+ u64 v;
+
+ minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
+
+ if (copy_from_user(&port_pr, argp, minsz))
+ return -EFAULT;
+
+ if (port_pr.argsz < minsz || port_pr.flags)
+ return -EINVAL;
+
+ /* get fme header region */
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ /* check port id */
+ v = readq(fme_hdr + FME_HDR_CAP);
+ if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
+ dev_dbg(&pdev->dev, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ /*
+ * align PR buffer per PR bandwidth, as HW ignores the extra padding
+ * data automatically.
+ */
+ length = ALIGN(port_pr.buffer_size, 4);
+
+ buf = vmalloc(length);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size)) {
+ ret = -EFAULT;
+ goto free_exit;
+ }
+
+ /* prepare fpga_image_info for PR */
+ info = fpga_image_info_alloc(&pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ /* fme device has been unregistered. */
+ if (!fme) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ region = dfl_fme_region_find(fme, port_pr.port_id);
+ if (!region) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ fpga_image_info_free(region->info);
+
+ info->buf = buf;
+ info->count = length;
+ info->region_id = port_pr.port_id;
+ region->info = info;
+
+ ret = fpga_region_program_fpga(region);
+
+ /*
+ * it allows userspace to reset the PR region's logic by disabling and
+ * reenabling the bridge to clear things out between acceleration runs.
+ * so no need to hold the bridges after partial reconfiguration.
+ */
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+
+ put_device(&region->dev);
+unlock_exit:
+ mutex_unlock(&pdata->lock);
+free_exit:
+ vfree(buf);
+ return ret;
+}
+
+/**
+ * dfl_fme_create_mgr - create fpga mgr platform device as child device
+ * @feature: sub feature info
+ * @pdata: fme platform_device's pdata
+ *
+ * Return: mgr platform device if successful, and error code otherwise.
+ */
+static struct platform_device *
+dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *mgr, *fme = pdata->dev;
+ struct dfl_fme_mgr_pdata mgr_pdata;
+ int ret = -ENOMEM;
+
+ if (!feature->ioaddr)
+ return ERR_PTR(-ENODEV);
+
+ mgr_pdata.ioaddr = feature->ioaddr;
+
+ /*
+ * Each FME has only one fpga-mgr, so allocate platform device using
+ * the same FME platform device id.
+ */
+ mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
+ if (!mgr)
+ return ERR_PTR(ret);
+
+ mgr->dev.parent = &fme->dev;
+
+ ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
+ if (ret)
+ goto create_mgr_err;
+
+ ret = platform_device_add(mgr);
+ if (ret)
+ goto create_mgr_err;
+
+ return mgr;
+
+create_mgr_err:
+ platform_device_put(mgr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_mgr - destroy fpga mgr platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+
+ platform_device_unregister(priv->mgr);
+}
+
+/**
+ * dfl_fme_create_bridge - create fme fpga bridge platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @port_id: port id for the bridge to be created.
+ *
+ * Return: bridge platform device if successful, and error code otherwise.
+ */
+static struct dfl_fme_bridge *
+dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+{
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_br_pdata br_pdata;
+ struct dfl_fme_bridge *fme_br;
+ int ret = -ENOMEM;
+
+ fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
+ if (!fme_br)
+ return ERR_PTR(ret);
+
+ br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.port_id = port_id;
+
+ fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
+ PLATFORM_DEVID_AUTO);
+ if (!fme_br->br)
+ return ERR_PTR(ret);
+
+ fme_br->br->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
+ if (ret)
+ goto create_br_err;
+
+ ret = platform_device_add(fme_br->br);
+ if (ret)
+ goto create_br_err;
+
+ return fme_br;
+
+create_br_err:
+ platform_device_put(fme_br->br);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy fpga bridge platform device
+ * @fme_br: fme bridge to destroy
+ */
+static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
+{
+ platform_device_unregister(fme_br->br);
+}
+
+/**
+ * dfl_fme_destroy_bridges - destroy all fpga bridge platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_bridge *fbridge, *tmp;
+
+ list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
+ list_del(&fbridge->node);
+ dfl_fme_destroy_bridge(fbridge);
+ }
+}
+
+/**
+ * dfl_fme_create_region - create fpga region platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @mgr: mgr platform device needed for region
+ * @br: br platform device needed for region
+ * @port_id: port id
+ *
+ * Return: fme region if successful, and error code otherwise.
+ */
+static struct dfl_fme_region *
+dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+ struct platform_device *mgr,
+ struct platform_device *br, int port_id)
+{
+ struct dfl_fme_region_pdata region_pdata;
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_region *fme_region;
+ int ret = -ENOMEM;
+
+ fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
+ if (!fme_region)
+ return ERR_PTR(ret);
+
+ region_pdata.mgr = mgr;
+ region_pdata.br = br;
+
+ /*
+ * Each FPGA device may have more than one port, so allocate platform
+ * device using the same port platform device id.
+ */
+ fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
+ if (!fme_region->region)
+ return ERR_PTR(ret);
+
+ fme_region->region->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_region->region, &region_pdata,
+ sizeof(region_pdata));
+ if (ret)
+ goto create_region_err;
+
+ ret = platform_device_add(fme_region->region);
+ if (ret)
+ goto create_region_err;
+
+ fme_region->port_id = port_id;
+
+ return fme_region;
+
+create_region_err:
+ platform_device_put(fme_region->region);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_region - destroy fme region
+ * @fme_region: fme region to destroy
+ */
+static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
+{
+ platform_device_unregister(fme_region->region);
+}
+
+/**
+ * dfl_fme_destroy_regions - destroy all fme regions
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_region *fme_region, *tmp;
+
+ list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
+ list_del(&fme_region->node);
+ dfl_fme_destroy_region(fme_region);
+ }
+}
+
+static int pr_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme_region *fme_region;
+ struct dfl_fme_bridge *fme_br;
+ struct platform_device *mgr;
+ struct dfl_fme *priv;
+ void __iomem *fme_hdr;
+ int ret = -ENODEV, i = 0;
+ u64 fme_cap, port_offset;
+
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ /* Initialize the region and bridge sub device list */
+ INIT_LIST_HEAD(&priv->region_list);
+ INIT_LIST_HEAD(&priv->bridge_list);
+
+ /* Create fpga mgr platform device */
+ mgr = dfl_fme_create_mgr(pdata, feature);
+ if (IS_ERR(mgr)) {
+ dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
+ goto unlock;
+ }
+
+ priv->mgr = mgr;
+
+ /* Read capability register to check number of regions and bridges */
+ fme_cap = readq(fme_hdr + FME_HDR_CAP);
+ for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
+ port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
+ if (!(port_offset & FME_PORT_OFST_IMP))
+ continue;
+
+ /* Create bridge for each port */
+ fme_br = dfl_fme_create_bridge(pdata, i);
+ if (IS_ERR(fme_br)) {
+ ret = PTR_ERR(fme_br);
+ goto destroy_region;
+ }
+
+ list_add(&fme_br->node, &priv->bridge_list);
+
+ /* Create region for each port */
+ fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_br->br, i);
+ if (IS_ERR(fme_region)) {
+ ret = PTR_ERR(fme_region);
+ goto destroy_region;
+ }
+
+ list_add(&fme_region->node, &priv->region_list);
+ }
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+destroy_region:
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+unlock:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static void pr_mgmt_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+ mutex_unlock(&pdata->lock);
+}
+
+static long fme_pr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_PR:
+ ret = fme_pr(pdev, arg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_PR_MGMT,},
+ {0}
+};
+
+const struct dfl_feature_ops fme_pr_mgmt_ops = {
+ .init = pr_mgmt_init,
+ .uinit = pr_mgmt_uinit,
+ .ioctl = fme_pr_ioctl,
+};
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
new file mode 100644
index 0000000000..761f80f633
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Partial Reconfiguration Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_PR_H
+#define __DFL_FME_PR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dfl_fme_region - FME fpga region data structure
+ *
+ * @region: platform device of the FPGA region.
+ * @node: used to link fme_region to a list.
+ * @port_id: indicate which port this region connected to.
+ */
+struct dfl_fme_region {
+ struct platform_device *region;
+ struct list_head node;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_region_pdata - platform data for FME region platform device.
+ *
+ * @mgr: platform device of the FPGA manager.
+ * @br: platform device of the FPGA bridge.
+ * @region_id: region id (same as port_id).
+ */
+struct dfl_fme_region_pdata {
+ struct platform_device *mgr;
+ struct platform_device *br;
+ int region_id;
+};
+
+/**
+ * struct dfl_fme_bridge - FME fpga bridge data structure
+ *
+ * @br: platform device of the FPGA bridge.
+ * @node: used to link fme_bridge to a list.
+ */
+struct dfl_fme_bridge {
+ struct platform_device *br;
+ struct list_head node;
+};
+
+/**
+ * struct dfl_fme_br_pdata - platform data for FME bridge platform device.
+ *
+ * @cdev: container device.
+ * @port_id: port id.
+ */
+struct dfl_fme_br_pdata {
+ struct dfl_fpga_cdev *cdev;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_mgr_pdata - platform data for FME manager platform device.
+ *
+ * @ioaddr: mapped io address for FME manager platform device.
+ */
+struct dfl_fme_mgr_pdata {
+ void __iomem *ioaddr;
+};
+
+#define DFL_FPGA_FME_MGR "dfl-fme-mgr"
+#define DFL_FPGA_FME_BRIDGE "dfl-fme-bridge"
+#define DFL_FPGA_FME_REGION "dfl-fme-region"
+
+#endif /* __DFL_FME_PR_H */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
new file mode 100644
index 0000000000..4aebde0a7f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-region.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+
+#include "dfl-fme-pr.h"
+
+static int fme_region_get_bridges(struct fpga_region *region)
+{
+ struct dfl_fme_region_pdata *pdata = region->priv;
+ struct device *dev = &pdata->br->dev;
+
+ return fpga_bridge_get_to_list(dev, region->info, &region->bridge_list);
+}
+
+static int fme_region_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_region_info info = { 0 };
+ struct device *dev = &pdev->dev;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ mgr = fpga_mgr_get(&pdata->mgr->dev);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ info.mgr = mgr;
+ info.compat_id = mgr->compat_id;
+ info.get_bridges = fme_region_get_bridges;
+ info.priv = pdata;
+ region = fpga_region_register_full(dev, &info);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
+ goto eprobe_mgr_put;
+ }
+
+ platform_set_drvdata(pdev, region);
+
+ dev_dbg(dev, "DFL FME FPGA Region probed\n");
+
+ return 0;
+
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int fme_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+ struct fpga_manager *mgr = region->mgr;
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_region_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
+ },
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
+};
+
+module_platform_driver(fme_region_driver);
+
+MODULE_DESCRIPTION("FPGA Region for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-region");
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
new file mode 100644
index 0000000000..4195dd6819
--- /dev/null
+++ b/drivers/fpga/dfl-fme.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_H
+#define __DFL_FME_H
+
+/**
+ * struct dfl_fme - dfl fme private data
+ *
+ * @mgr: FME's FPGA manager platform device.
+ * @region_list: linked list of FME's FPGA regions.
+ * @bridge_list: linked list of FME's FPGA bridges.
+ * @pdata: fme platform device's pdata.
+ */
+struct dfl_fme {
+ struct platform_device *mgr;
+ struct list_head region_list;
+ struct list_head bridge_list;
+ struct dfl_feature_platform_data *pdata;
+};
+
+extern const struct dfl_feature_ops fme_pr_mgmt_ops;
+extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
+extern const struct dfl_feature_ops fme_global_err_ops;
+extern const struct dfl_feature_id fme_global_err_id_table[];
+extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
+
+#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c
new file mode 100644
index 0000000000..9ddf1d1d39
--- /dev/null
+++ b/drivers/fpga/dfl-n3000-nios.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFL device driver for Nios private feature on Intel PAC (Programmable
+ * Acceleration Card) N3000
+ *
+ * Copyright (C) 2019-2020 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stddef.h>
+#include <linux/spi/altera.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/*
+ * N3000 Nios private feature registers, named as NIOS_SPI_XX on spec.
+ * NS is the abbreviation of NIOS_SPI.
+ */
+#define N3000_NS_PARAM 0x8
+#define N3000_NS_PARAM_SHIFT_MODE_MSK BIT_ULL(1)
+#define N3000_NS_PARAM_SHIFT_MODE_MSB 0
+#define N3000_NS_PARAM_SHIFT_MODE_LSB 1
+#define N3000_NS_PARAM_DATA_WIDTH GENMASK_ULL(7, 2)
+#define N3000_NS_PARAM_NUM_CS GENMASK_ULL(13, 8)
+#define N3000_NS_PARAM_CLK_POL BIT_ULL(14)
+#define N3000_NS_PARAM_CLK_PHASE BIT_ULL(15)
+#define N3000_NS_PARAM_PERIPHERAL_ID GENMASK_ULL(47, 32)
+
+#define N3000_NS_CTRL 0x10
+#define N3000_NS_CTRL_WR_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_CTRL_ADDR GENMASK_ULL(44, 32)
+#define N3000_NS_CTRL_CMD_MSK GENMASK_ULL(63, 62)
+#define N3000_NS_CTRL_CMD_NOP 0
+#define N3000_NS_CTRL_CMD_RD 1
+#define N3000_NS_CTRL_CMD_WR 2
+
+#define N3000_NS_STAT 0x18
+#define N3000_NS_STAT_RD_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_STAT_RW_VAL BIT_ULL(32)
+
+/* Nios handshake registers, indirect access */
+#define N3000_NIOS_INIT 0x1000
+#define N3000_NIOS_INIT_DONE BIT(0)
+#define N3000_NIOS_INIT_START BIT(1)
+/* Mode for retimer A, link 0, the same below */
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK GENMASK(9, 8)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK GENMASK(11, 10)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK GENMASK(13, 12)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK GENMASK(15, 14)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK GENMASK(17, 16)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK GENMASK(19, 18)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK GENMASK(21, 20)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK GENMASK(23, 22)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO 0x0
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR 0x1
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS 0x2
+
+#define N3000_NIOS_FW_VERSION 0x1004
+#define N3000_NIOS_FW_VERSION_PATCH GENMASK(23, 20)
+#define N3000_NIOS_FW_VERSION_MINOR GENMASK(27, 24)
+#define N3000_NIOS_FW_VERSION_MAJOR GENMASK(31, 28)
+
+/* The retimers we use on Intel PAC N3000 is Parkvale, abbreviated to PKVL */
+#define N3000_NIOS_PKVL_A_MODE_STS 0x1020
+#define N3000_NIOS_PKVL_B_MODE_STS 0x1024
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_MSK GENMASK(15, 8)
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_OK 0x0
+#define N3000_NIOS_PKVL_MODE_STS_ID_MSK GENMASK(7, 0)
+/* When GROUP MASK field == GROUP_OK */
+#define N3000_NIOS_PKVL_MODE_ID_RESET 0x0
+#define N3000_NIOS_PKVL_MODE_ID_4X10G 0x1
+#define N3000_NIOS_PKVL_MODE_ID_4X25G 0x2
+#define N3000_NIOS_PKVL_MODE_ID_2X25G 0x3
+#define N3000_NIOS_PKVL_MODE_ID_2X25G_2X10G 0x4
+#define N3000_NIOS_PKVL_MODE_ID_1X25G 0x5
+
+#define N3000_NIOS_REGBUS_RETRY_COUNT 10000 /* loop count */
+
+#define N3000_NIOS_INIT_TIMEOUT 10000000 /* usec */
+#define N3000_NIOS_INIT_TIME_INTV 100000 /* usec */
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL \
+ (N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK)
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS))
+
+struct n3000_nios {
+ void __iomem *base;
+ struct regmap *regmap;
+ struct device *dev;
+ struct platform_device *altera_spi;
+};
+
+static ssize_t nios_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%x.%x.%x\n",
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MINOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_PATCH, val));
+}
+static DEVICE_ATTR_RO(nios_fw_version);
+
+#define IS_MODE_STATUS_OK(mode_stat) \
+ (FIELD_GET(N3000_NIOS_PKVL_MODE_STS_GROUP_MSK, (mode_stat)) == \
+ N3000_NIOS_PKVL_MODE_STS_GROUP_OK)
+
+#define IS_RETIMER_FEC_SUPPORTED(retimer_mode) \
+ ((retimer_mode) != N3000_NIOS_PKVL_MODE_ID_RESET && \
+ (retimer_mode) != N3000_NIOS_PKVL_MODE_ID_4X10G)
+
+static int get_retimer_mode(struct n3000_nios *nn, unsigned int mode_stat_reg,
+ unsigned int *retimer_mode)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, mode_stat_reg, &val);
+ if (ret)
+ return ret;
+
+ if (!IS_MODE_STATUS_OK(val))
+ return -EFAULT;
+
+ *retimer_mode = FIELD_GET(N3000_NIOS_PKVL_MODE_STS_ID_MSK, val);
+
+ return 0;
+}
+
+static ssize_t retimer_A_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_A_mode);
+
+static ssize_t retimer_B_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_B_mode);
+
+static ssize_t fec_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int val, retimer_a_mode, retimer_b_mode, fec_modes;
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ int ret;
+
+ /* FEC mode setting is not supported in early FW versions */
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) < 3)
+ return sysfs_emit(buf, "not supported\n");
+
+ /* If no 25G links, FEC mode setting is not supported either */
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &retimer_a_mode);
+ if (ret)
+ return ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &retimer_b_mode);
+ if (ret)
+ return ret;
+
+ if (!IS_RETIMER_FEC_SUPPORTED(retimer_a_mode) &&
+ !IS_RETIMER_FEC_SUPPORTED(retimer_b_mode))
+ return sysfs_emit(buf, "not supported\n");
+
+ /* get the valid FEC mode for 25G links */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * FEC mode should always be the same for all links, as we set them
+ * in this way.
+ */
+ fec_modes = (val & N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL);
+ if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL)
+ return sysfs_emit(buf, "no\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL)
+ return sysfs_emit(buf, "kr\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL)
+ return sysfs_emit(buf, "rs\n");
+
+ return -EFAULT;
+}
+static DEVICE_ATTR_RO(fec_mode);
+
+static struct attribute *n3000_nios_attrs[] = {
+ &dev_attr_nios_fw_version.attr,
+ &dev_attr_retimer_A_mode.attr,
+ &dev_attr_retimer_B_mode.attr,
+ &dev_attr_fec_mode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(n3000_nios);
+
+static int n3000_nios_init_done_check(struct n3000_nios *nn)
+{
+ unsigned int val, state_a, state_b;
+ struct device *dev = nn->dev;
+ int ret, ret2;
+
+ /*
+ * The SPI is shared by the Nios core inside the FPGA, Nios will use
+ * this SPI master to do some one time initialization after power up,
+ * and then release the control to OS. The driver needs to poll on
+ * INIT_DONE to see when driver could take the control.
+ *
+ * Please note that after Nios firmware version 3.0.0, INIT_START is
+ * introduced, so driver needs to trigger START firstly and then check
+ * INIT_DONE.
+ */
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If Nios version register is totally uninitialized(== 0x0), then the
+ * Nios firmware is missing. So host could take control of SPI master
+ * safely, but initialization work for Nios is not done. To restore the
+ * card, we need to reprogram a new Nios firmware via the BMC chip on
+ * SPI bus. So the driver doesn't error out, it continues to create the
+ * spi controller device and spi_board_info for BMC.
+ */
+ if (val == 0) {
+ dev_err(dev, "Nios version reg = 0x%x, skip INIT_DONE check, but the retimer may be uninitialized\n",
+ val);
+ return 0;
+ }
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) >= 3) {
+ /* read NIOS_INIT to check if retimer initialization is done */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /* check if retimers are initialized already */
+ if (val & (N3000_NIOS_INIT_DONE | N3000_NIOS_INIT_START))
+ goto nios_init_done;
+
+ /* configure FEC mode per module param */
+ val = N3000_NIOS_INIT_START;
+
+ /*
+ * When the retimer is to be set to 10G mode, there is no FEC
+ * mode setting, so the REQ_FEC_MODE field will be ignored by
+ * Nios firmware in this case. But we should still fill the FEC
+ * mode field cause host could not get the retimer working mode
+ * until the Nios init is done.
+ *
+ * For now the driver doesn't support the retimer FEC mode
+ * switching per user's request. It is always set to Reed
+ * Solomon FEC.
+ *
+ * The driver will set the same FEC mode for all links.
+ */
+ val |= N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL;
+
+ ret = regmap_write(nn->regmap, N3000_NIOS_INIT, val);
+ if (ret)
+ return ret;
+ }
+
+nios_init_done:
+ /* polls on NIOS_INIT_DONE */
+ ret = regmap_read_poll_timeout(nn->regmap, N3000_NIOS_INIT, val,
+ val & N3000_NIOS_INIT_DONE,
+ N3000_NIOS_INIT_TIME_INTV,
+ N3000_NIOS_INIT_TIMEOUT);
+ if (ret)
+ dev_err(dev, "NIOS_INIT_DONE %s\n",
+ (ret == -ETIMEDOUT) ? "timed out" : "check error");
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_A_MODE_STS, &state_a);
+ if (ret2)
+ return ret2;
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_B_MODE_STS, &state_b);
+ if (ret2)
+ return ret2;
+
+ if (!ret) {
+ /*
+ * After INIT_DONE is detected, it still needs to check if the
+ * Nios firmware reports any error during the retimer
+ * configuration.
+ */
+ if (IS_MODE_STATUS_OK(state_a) && IS_MODE_STATUS_OK(state_b))
+ return 0;
+
+ /*
+ * If the retimer configuration is failed, the Nios firmware
+ * will still release the spi controller for host to
+ * communicate with the BMC. It makes possible for people to
+ * reprogram a new Nios firmware and restore the card. So the
+ * driver doesn't error out, it continues to create the spi
+ * controller device and spi_board_info for BMC.
+ */
+ dev_err(dev, "NIOS_INIT_DONE OK, but err on retimer init\n");
+ }
+
+ dev_err(nn->dev, "PKVL_A_MODE_STS 0x%x\n", state_a);
+ dev_err(nn->dev, "PKVL_B_MODE_STS 0x%x\n", state_b);
+
+ return ret;
+}
+
+static struct spi_board_info m10_n3000_info = {
+ .modalias = "m10-n3000",
+ .max_speed_hz = 12500000,
+ .bus_num = 0,
+ .chip_select = 0,
+};
+
+static int create_altera_spi_controller(struct n3000_nios *nn)
+{
+ struct altera_spi_platform_data pdata = { 0 };
+ struct platform_device_info pdevinfo = { 0 };
+ void __iomem *base = nn->base;
+ u64 v;
+
+ v = readq(base + N3000_NS_PARAM);
+
+ pdata.mode_bits = SPI_CS_HIGH;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_POL, v))
+ pdata.mode_bits |= SPI_CPOL;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_PHASE, v))
+ pdata.mode_bits |= SPI_CPHA;
+
+ pdata.num_chipselect = FIELD_GET(N3000_NS_PARAM_NUM_CS, v);
+ pdata.bits_per_word_mask =
+ SPI_BPW_RANGE_MASK(1, FIELD_GET(N3000_NS_PARAM_DATA_WIDTH, v));
+
+ pdata.num_devices = 1;
+ pdata.devices = &m10_n3000_info;
+
+ dev_dbg(nn->dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
+ pdata.num_chipselect, pdata.bits_per_word_mask,
+ pdata.mode_bits);
+
+ pdevinfo.name = "subdev_spi_altera";
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.parent = nn->dev;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+
+ nn->altera_spi = platform_device_register_full(&pdevinfo);
+ return PTR_ERR_OR_ZERO(nn->altera_spi);
+}
+
+static void destroy_altera_spi_controller(struct n3000_nios *nn)
+{
+ platform_device_unregister(nn->altera_spi);
+}
+
+static int n3000_nios_poll_stat_timeout(void __iomem *base, u64 *v)
+{
+ int loops;
+
+ /*
+ * We don't use the time based timeout here for performance.
+ *
+ * The regbus read/write is on the critical path of Intel PAC N3000
+ * image programming. The time based timeout checking will add too much
+ * overhead on it. Usually the state changes in 1 or 2 loops on the
+ * test server, and we set 10000 times loop here for safety.
+ */
+ for (loops = N3000_NIOS_REGBUS_RETRY_COUNT; loops > 0 ; loops--) {
+ *v = readq(base + N3000_NS_STAT);
+ if (*v & N3000_NS_STAT_RW_VAL)
+ break;
+ cpu_relax();
+ }
+
+ return (loops > 0) ? 0 : -ETIMEDOUT;
+}
+
+static int n3000_nios_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_WR) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg) |
+ FIELD_PREP(N3000_NS_CTRL_WR_DATA, val);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to write reg 0x%x val 0x%x: %d\n",
+ reg, val, ret);
+
+ return ret;
+}
+
+static int n3000_nios_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_RD) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to read reg 0x%x: %d\n", reg, ret);
+ else
+ *val = FIELD_GET(N3000_NS_STAT_RD_DATA, v);
+
+ return ret;
+}
+
+static const struct regmap_config n3000_nios_regbus_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+
+ .reg_write = n3000_nios_reg_write,
+ .reg_read = n3000_nios_reg_read,
+};
+
+static int n3000_nios_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct n3000_nios *nn;
+ int ret;
+
+ nn = devm_kzalloc(dev, sizeof(*nn), GFP_KERNEL);
+ if (!nn)
+ return -ENOMEM;
+
+ dev_set_drvdata(&ddev->dev, nn);
+
+ nn->dev = dev;
+
+ nn->base = devm_ioremap_resource(&ddev->dev, &ddev->mmio_res);
+ if (IS_ERR(nn->base))
+ return PTR_ERR(nn->base);
+
+ nn->regmap = devm_regmap_init(dev, NULL, nn, &n3000_nios_regbus_cfg);
+ if (IS_ERR(nn->regmap))
+ return PTR_ERR(nn->regmap);
+
+ ret = n3000_nios_init_done_check(nn);
+ if (ret)
+ return ret;
+
+ ret = create_altera_spi_controller(nn);
+ if (ret)
+ dev_err(dev, "altera spi controller create failed: %d\n", ret);
+
+ return ret;
+}
+
+static void n3000_nios_remove(struct dfl_device *ddev)
+{
+ struct n3000_nios *nn = dev_get_drvdata(&ddev->dev);
+
+ destroy_altera_spi_controller(nn);
+}
+
+#define FME_FEATURE_ID_N3000_NIOS 0xd
+
+static const struct dfl_device_id n3000_nios_ids[] = {
+ { FME_ID, FME_FEATURE_ID_N3000_NIOS },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, n3000_nios_ids);
+
+static struct dfl_driver n3000_nios_driver = {
+ .drv = {
+ .name = "dfl-n3000-nios",
+ .dev_groups = n3000_nios_groups,
+ },
+ .id_table = n3000_nios_ids,
+ .probe = n3000_nios_probe,
+ .remove = n3000_nios_remove,
+};
+
+module_dfl_driver(n3000_nios_driver);
+
+MODULE_DESCRIPTION("Driver for Nios private feature on Intel PAC N3000");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
new file mode 100644
index 0000000000..98b8fd1618
--- /dev/null
+++ b/drivers/fpga/dfl-pci.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) PCIe device
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Zhang Yi <Yi.Z.Zhang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+
+#include "dfl.h"
+
+#define DRV_VERSION "0.8"
+#define DRV_NAME "dfl-pci"
+
+#define PCI_VSEC_ID_INTEL_DFLS 0x43
+
+#define PCI_VNDR_DFLS_CNT 0x8
+#define PCI_VNDR_DFLS_RES 0xc
+
+#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
+#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
+
+struct cci_drvdata {
+ struct dfl_fpga_cdev *cdev; /* container device */
+};
+
+static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
+{
+ if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
+ return NULL;
+
+ return pcim_iomap_table(pcidev)[0];
+}
+
+static int cci_pci_alloc_irq(struct pci_dev *pcidev)
+{
+ int ret, nvec = pci_msix_vec_count(pcidev);
+
+ if (nvec <= 0) {
+ dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
+ return 0;
+ }
+
+ ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0)
+ return ret;
+
+ return nvec;
+}
+
+static void cci_pci_free_irq(struct pci_dev *pcidev)
+{
+ pci_free_irq_vectors(pcidev);
+}
+
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
+#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
+#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
+#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
+/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
+#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
+#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
+
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
+#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf
+
+static struct pci_device_id cci_pcie_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
+ {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
+ {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+
+static int cci_init_drvdata(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pci_set_drvdata(pcidev, drvdata);
+
+ return 0;
+}
+
+static void cci_remove_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+
+ /* remove all children feature devices */
+ dfl_fpga_feature_devs_remove(drvdata->cdev);
+ cci_pci_free_irq(pcidev);
+}
+
+static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
+{
+ unsigned int i;
+ int *table;
+
+ table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
+ if (!table)
+ return table;
+
+ for (i = 0; i < nvec; i++)
+ table[i] = pci_irq_vector(pcidev, i);
+
+ return table;
+}
+
+static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
+{
+ u32 bir, offset, dfl_cnt, dfl_res;
+ int dfl_res_off, i, bars, voff;
+ resource_size_t start, len;
+
+ voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
+ PCI_VSEC_ID_INTEL_DFLS);
+ if (!voff) {
+ dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
+ return -ENODEV;
+ }
+
+ dfl_cnt = 0;
+ pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
+ if (dfl_cnt > PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
+ __func__, dfl_cnt, PCI_STD_NUM_BARS);
+ return -EINVAL;
+ }
+
+ dfl_res_off = voff + PCI_VNDR_DFLS_RES;
+ if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
+ dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
+ dfl_res = GENMASK(31, 0);
+ pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
+
+ bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
+ if (bir >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s bad bir number %d\n",
+ __func__, bir);
+ return -EINVAL;
+ }
+
+ if (bars & BIT(bir)) {
+ dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
+ __func__, bir);
+ return -EINVAL;
+ }
+
+ bars |= BIT(bir);
+
+ len = pci_resource_len(pcidev, bir);
+ offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
+ if (offset >= len) {
+ dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
+ __func__, offset, &len);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
+
+ len -= offset;
+
+ start = pci_resource_start(pcidev, bir) + offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
+ }
+
+ return 0;
+}
+
+/* default method of finding dfls starting at offset 0 of bar 0 */
+static int find_dfls_by_default(struct pci_dev *pcidev,
+ struct dfl_fpga_enum_info *info)
+{
+ int port_num, bar, i, ret = 0;
+ resource_size_t start, len;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* start to find Device Feature List from Bar 0 */
+ base = cci_pci_ioremap_bar0(pcidev);
+ if (!base)
+ return -ENOMEM;
+
+ /*
+ * PF device has FME and Ports/AFUs, and VF device only has one
+ * Port/AFU. Check them and add related "Device Feature List" info
+ * for the next step enumeration.
+ */
+ if (dfl_feature_is_fme(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
+
+ /*
+ * find more Device Feature Lists (e.g. Ports) per information
+ * indicated by FME module.
+ */
+ v = readq(base + FME_HDR_CAP);
+ port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
+
+ WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
+
+ for (i = 0; i < port_num; i++) {
+ v = readq(base + FME_HDR_PORT_OFST(i));
+
+ /* skip ports which are not implemented. */
+ if (!(v & FME_PORT_OFST_IMP))
+ continue;
+
+ /*
+ * add Port's Device Feature List information for next
+ * step enumeration.
+ */
+ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
+ offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ if (bar == FME_PORT_OFST_BAR_SKIP) {
+ continue;
+ } else if (bar >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
+ bar, i);
+ ret = -EINVAL;
+ break;
+ }
+
+ start = pci_resource_start(pcidev, bar) + offset;
+ len = pci_resource_len(pcidev, bar) - offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
+ }
+ } else if (dfl_feature_is_port(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
+ } else {
+ ret = -ENODEV;
+ }
+
+ /* release I/O mappings for next step enumeration */
+ pcim_iounmap_regions(pcidev, BIT(0));
+
+ return ret;
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ int nvec, ret = 0;
+ int *irq_table;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* add irq info for enumeration if the device support irq */
+ nvec = cci_pci_alloc_irq(pcidev);
+ if (nvec < 0) {
+ dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+ ret = nvec;
+ goto enum_info_free_exit;
+ } else if (nvec) {
+ irq_table = cci_pci_create_irq_table(pcidev, nvec);
+ if (!irq_table) {
+ ret = -ENOMEM;
+ goto irq_free_exit;
+ }
+
+ ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+ kfree(irq_table);
+ if (ret)
+ goto irq_free_exit;
+ }
+
+ ret = find_dfls_by_vsec(pcidev, info);
+ if (ret == -ENODEV)
+ ret = find_dfls_by_default(pcidev, info);
+
+ if (ret)
+ goto irq_free_exit;
+
+ /* start enumeration with prepared enumeration information */
+ cdev = dfl_fpga_feature_devs_enumerate(info);
+ if (IS_ERR(cdev)) {
+ dev_err(&pcidev->dev, "Enumeration failure\n");
+ ret = PTR_ERR(cdev);
+ goto irq_free_exit;
+ }
+
+ drvdata->cdev = cdev;
+
+irq_free_exit:
+ if (ret)
+ cci_pci_free_irq(pcidev);
+enum_info_free_exit:
+ dfl_fpga_enum_info_free(info);
+
+ return ret;
+}
+
+static
+int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
+{
+ int ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret < 0) {
+ dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pcidev);
+
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pcidev->dev, "No suitable DMA support available.\n");
+ return ret;
+ }
+
+ ret = cci_init_drvdata(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
+ return ret;
+ }
+
+ ret = cci_enumerate_feature_devs(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_cdev *cdev = drvdata->cdev;
+
+ if (!num_vfs) {
+ /*
+ * disable SRIOV and then put released ports back to default
+ * PF access mode.
+ */
+ pci_disable_sriov(pcidev);
+
+ dfl_fpga_cdev_config_ports_pf(cdev);
+
+ } else {
+ int ret;
+
+ /*
+ * before enable SRIOV, put released ports into VF access mode
+ * first of all.
+ */
+ ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = pci_enable_sriov(pcidev, num_vfs);
+ if (ret) {
+ dfl_fpga_cdev_config_ports_pf(cdev);
+ return ret;
+ }
+ }
+
+ return num_vfs;
+}
+
+static void cci_pci_remove(struct pci_dev *pcidev)
+{
+ if (dev_is_pf(&pcidev->dev))
+ cci_pci_sriov_configure(pcidev, 0);
+
+ cci_remove_feature_devs(pcidev);
+}
+
+static struct pci_driver cci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cci_pcie_id_tbl,
+ .probe = cci_pci_probe,
+ .remove = cci_pci_remove,
+ .sriov_configure = cci_pci_sriov_configure,
+};
+
+module_pci_driver(cci_pci_driver);
+
+MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
new file mode 100644
index 0000000000..dd7a783d53
--- /dev/null
+++ b/drivers/fpga/dfl.c
@@ -0,0 +1,2037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include <linux/dfl.h>
+#include <linux/fpga-dfl.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/uaccess.h>
+
+#include "dfl.h"
+
+static DEFINE_MUTEX(dfl_id_mutex);
+
+/*
+ * when adding a new feature dev support in DFL framework, it's required to
+ * add a new item in enum dfl_id_type and provide related information in below
+ * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
+ * platform device creation (define name strings in dfl.h, as they could be
+ * reused by platform device drivers).
+ *
+ * if the new feature dev needs chardev support, then it's required to add
+ * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
+ * index to dfl_chardevs table. If no chardev support just set devt_type
+ * as one invalid index (DFL_FPGA_DEVT_MAX).
+ */
+enum dfl_fpga_devt_type {
+ DFL_FPGA_DEVT_FME,
+ DFL_FPGA_DEVT_PORT,
+ DFL_FPGA_DEVT_MAX,
+};
+
+static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
+
+static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
+ "dfl-fme-pdata",
+ "dfl-port-pdata",
+};
+
+/**
+ * struct dfl_dev_info - dfl feature device information.
+ * @name: name string of the feature platform device.
+ * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
+ * @id: idr id of the feature dev.
+ * @devt_type: index to dfl_chrdevs[].
+ */
+struct dfl_dev_info {
+ const char *name;
+ u16 dfh_id;
+ struct idr id;
+ enum dfl_fpga_devt_type devt_type;
+};
+
+/* it is indexed by dfl_id_type */
+static struct dfl_dev_info dfl_devs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
+ .devt_type = DFL_FPGA_DEVT_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
+ .devt_type = DFL_FPGA_DEVT_PORT},
+};
+
+/**
+ * struct dfl_chardev_info - chardev information of dfl feature device
+ * @name: nmae string of the char device.
+ * @devt: devt of the char device.
+ */
+struct dfl_chardev_info {
+ const char *name;
+ dev_t devt;
+};
+
+/* indexed by enum dfl_fpga_devt_type */
+static struct dfl_chardev_info dfl_chrdevs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT},
+};
+
+static void dfl_ids_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_init(&dfl_devs[i].id);
+}
+
+static void dfl_ids_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_destroy(&dfl_devs[i].id);
+}
+
+static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
+{
+ int id;
+
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
+ mutex_unlock(&dfl_id_mutex);
+
+ return id;
+}
+
+static void dfl_id_free(enum dfl_id_type type, int id)
+{
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ idr_remove(&dfl_devs[type].id, id);
+ mutex_unlock(&dfl_id_mutex);
+}
+
+static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (!strcmp(dfl_devs[i].name, pdev->name))
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+static enum dfl_id_type dfh_id_to_type(u16 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (dfl_devs[i].dfh_id == id)
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+/*
+ * introduce a global port_ops list, it allows port drivers to register ops
+ * in such list, then other feature devices (e.g. FME), could use the port
+ * functions even related port platform device is hidden. Below is one example,
+ * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
+ * enabled, port (and it's AFU) is turned into VF and port platform device
+ * is hidden from system but it's still required to access port to finish FPGA
+ * reconfiguration function in FME.
+ */
+
+static DEFINE_MUTEX(dfl_port_ops_mutex);
+static LIST_HEAD(dfl_port_ops_list);
+
+/**
+ * dfl_fpga_port_ops_get - get matched port ops from the global list
+ * @pdev: platform device to match with associated port ops.
+ * Return: matched port ops on success, NULL otherwise.
+ *
+ * Please note that must dfl_fpga_port_ops_put after use the port_ops.
+ */
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+{
+ struct dfl_fpga_port_ops *ops = NULL;
+
+ mutex_lock(&dfl_port_ops_mutex);
+ if (list_empty(&dfl_port_ops_list))
+ goto done;
+
+ list_for_each_entry(ops, &dfl_port_ops_list, node) {
+ /* match port_ops using the name of platform device */
+ if (!strcmp(pdev->name, ops->name)) {
+ if (!try_module_get(ops->owner))
+ ops = NULL;
+ goto done;
+ }
+ }
+
+ ops = NULL;
+done:
+ mutex_unlock(&dfl_port_ops_mutex);
+ return ops;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
+
+/**
+ * dfl_fpga_port_ops_put - put port ops
+ * @ops: port ops.
+ */
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
+{
+ if (ops && ops->owner)
+ module_put(ops->owner);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
+
+/**
+ * dfl_fpga_port_ops_add - add port_ops to global list
+ * @ops: port ops to add.
+ */
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_add_tail(&ops->node, &dfl_port_ops_list);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
+
+/**
+ * dfl_fpga_port_ops_del - remove port_ops from global list
+ * @ops: port ops to del.
+ */
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_del(&ops->node);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
+
+/**
+ * dfl_fpga_check_port_id - check the port id
+ * @pdev: port platform device.
+ * @pport_id: port id to compare.
+ *
+ * Return: 1 if port device matches with given port id, otherwise 0.
+ */
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_port_ops *port_ops;
+
+ if (pdata->id != FEATURE_DEV_ID_UNUSED)
+ return pdata->id == *(int *)pport_id;
+
+ port_ops = dfl_fpga_port_ops_get(pdev);
+ if (!port_ops || !port_ops->get_id)
+ return 0;
+
+ pdata->id = port_ops->get_id(pdev);
+ dfl_fpga_port_ops_put(port_ops);
+
+ return pdata->id == *(int *)pport_id;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
+
+static DEFINE_IDA(dfl_device_ida);
+
+static const struct dfl_device_id *
+dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
+{
+ if (id->type == ddev->type && id->feature_id == ddev->feature_id)
+ return id;
+
+ return NULL;
+}
+
+static int dfl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+ struct dfl_driver *ddrv = to_dfl_drv(drv);
+ const struct dfl_device_id *id_entry;
+
+ id_entry = ddrv->id_table;
+ if (id_entry) {
+ while (id_entry->feature_id) {
+ if (dfl_match_one_device(id_entry, ddev)) {
+ ddev->id_entry = id_entry;
+ return 1;
+ }
+ id_entry++;
+ }
+ }
+
+ return 0;
+}
+
+static int dfl_bus_probe(struct device *dev)
+{
+ struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return ddrv->probe(ddev);
+}
+
+static void dfl_bus_remove(struct device *dev)
+{
+ struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ if (ddrv->remove)
+ ddrv->remove(ddev);
+}
+
+static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
+ ddev->type, ddev->feature_id);
+}
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return sprintf(buf, "0x%x\n", ddev->type);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t
+feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return sprintf(buf, "0x%x\n", ddev->feature_id);
+}
+static DEVICE_ATTR_RO(feature_id);
+
+static struct attribute *dfl_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_feature_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dfl_dev);
+
+static struct bus_type dfl_bus_type = {
+ .name = "dfl",
+ .match = dfl_bus_match,
+ .probe = dfl_bus_probe,
+ .remove = dfl_bus_remove,
+ .uevent = dfl_bus_uevent,
+ .dev_groups = dfl_dev_groups,
+};
+
+static void release_dfl_dev(struct device *dev)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ if (ddev->mmio_res.parent)
+ release_resource(&ddev->mmio_res);
+
+ kfree(ddev->params);
+
+ ida_free(&dfl_device_ida, ddev->id);
+ kfree(ddev->irqs);
+ kfree(ddev);
+}
+
+static struct dfl_device *
+dfl_dev_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *pdev = pdata->dev;
+ struct resource *parent_res;
+ struct dfl_device *ddev;
+ int id, i, ret;
+
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&dfl_device_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(&pdev->dev, "unable to get id\n");
+ kfree(ddev);
+ return ERR_PTR(id);
+ }
+
+ /* freeing resources by put_device() after device_initialize() */
+ device_initialize(&ddev->dev);
+ ddev->dev.parent = &pdev->dev;
+ ddev->dev.bus = &dfl_bus_type;
+ ddev->dev.release = release_dfl_dev;
+ ddev->id = id;
+ ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
+ if (ret)
+ goto put_dev;
+
+ ddev->type = feature_dev_id_type(pdev);
+ ddev->feature_id = feature->id;
+ ddev->revision = feature->revision;
+ ddev->dfh_version = feature->dfh_version;
+ ddev->cdev = pdata->dfl_cdev;
+ if (feature->param_size) {
+ ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
+ if (!ddev->params) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ddev->param_size = feature->param_size;
+ }
+
+ /* add mmio resource */
+ parent_res = &pdev->resource[feature->resource_index];
+ ddev->mmio_res.flags = IORESOURCE_MEM;
+ ddev->mmio_res.start = parent_res->start;
+ ddev->mmio_res.end = parent_res->end;
+ ddev->mmio_res.name = dev_name(&ddev->dev);
+ ret = insert_resource(parent_res, &ddev->mmio_res);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
+ dev_name(&ddev->dev), &ddev->mmio_res);
+ goto put_dev;
+ }
+
+ /* then add irq resource */
+ if (feature->nr_irqs) {
+ ddev->irqs = kcalloc(feature->nr_irqs,
+ sizeof(*ddev->irqs), GFP_KERNEL);
+ if (!ddev->irqs) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ for (i = 0; i < feature->nr_irqs; i++)
+ ddev->irqs[i] = feature->irq_ctx[i].irq;
+
+ ddev->num_irqs = feature->nr_irqs;
+ }
+
+ ret = device_add(&ddev->dev);
+ if (ret)
+ goto put_dev;
+
+ dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
+ return ddev;
+
+put_dev:
+ /* calls release_dfl_dev() which does the clean up */
+ put_device(&ddev->dev);
+ return ERR_PTR(ret);
+}
+
+static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (feature->ddev) {
+ device_unregister(&feature->ddev->dev);
+ feature->ddev = NULL;
+ }
+ }
+}
+
+static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_feature *feature;
+ struct dfl_device *ddev;
+ int ret;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (feature->ioaddr)
+ continue;
+
+ if (feature->ddev) {
+ ret = -EEXIST;
+ goto err;
+ }
+
+ ddev = dfl_dev_add(pdata, feature);
+ if (IS_ERR(ddev)) {
+ ret = PTR_ERR(ddev);
+ goto err;
+ }
+
+ feature->ddev = ddev;
+ }
+
+ return 0;
+
+err:
+ dfl_devs_remove(pdata);
+ return ret;
+}
+
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
+{
+ if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
+ return -EINVAL;
+
+ dfl_drv->drv.owner = owner;
+ dfl_drv->drv.bus = &dfl_bus_type;
+
+ return driver_register(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(__dfl_driver_register);
+
+void dfl_driver_unregister(struct dfl_driver *dfl_drv)
+{
+ driver_unregister(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(dfl_driver_unregister);
+
+#define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
+
+/**
+ * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
+ * @pdev: feature device.
+ */
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature *feature;
+
+ dfl_devs_remove(pdata);
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (feature->ops) {
+ if (feature->ops->uinit)
+ feature->ops->uinit(pdev, feature);
+ feature->ops = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
+
+static int dfl_feature_instance_init(struct platform_device *pdev,
+ struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature,
+ struct dfl_feature_driver *drv)
+{
+ void __iomem *base;
+ int ret = 0;
+
+ if (!is_header_feature(feature)) {
+ base = devm_platform_ioremap_resource(pdev,
+ feature->resource_index);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev,
+ "ioremap failed for feature 0x%x!\n",
+ feature->id);
+ return PTR_ERR(base);
+ }
+
+ feature->ioaddr = base;
+ }
+
+ if (drv->ops->init) {
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+ }
+
+ feature->ops = drv->ops;
+
+ return ret;
+}
+
+static bool dfl_feature_drv_match(struct dfl_feature *feature,
+ struct dfl_feature_driver *driver)
+{
+ const struct dfl_feature_id *ids = driver->id_table;
+
+ if (ids) {
+ while (ids->id) {
+ if (ids->id == feature->id)
+ return true;
+ ids++;
+ }
+ }
+ return false;
+}
+
+/**
+ * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
+ * @pdev: feature device.
+ * @feature_drvs: drvs for sub features.
+ *
+ * This function will match sub features with given feature drvs list and
+ * use matched drv to init related sub feature.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_driver *drv = feature_drvs;
+ struct dfl_feature *feature;
+ int ret;
+
+ while (drv->ops) {
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (dfl_feature_drv_match(feature, drv)) {
+ ret = dfl_feature_instance_init(pdev, pdata,
+ feature, drv);
+ if (ret)
+ goto exit;
+ }
+ }
+ drv++;
+ }
+
+ ret = dfl_devs_add(pdata);
+ if (ret)
+ goto exit;
+
+ return 0;
+exit:
+ dfl_fpga_dev_feature_uinit(pdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
+
+static void dfl_chardev_uinit(void)
+{
+ int i;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
+ if (MAJOR(dfl_chrdevs[i].devt)) {
+ unregister_chrdev_region(dfl_chrdevs[i].devt,
+ MINORMASK + 1);
+ dfl_chrdevs[i].devt = MKDEV(0, 0);
+ }
+}
+
+static int dfl_chardev_init(void)
+{
+ int i, ret;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
+ MINORMASK + 1, dfl_chrdevs[i].name);
+ if (ret)
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ dfl_chardev_uinit();
+ return ret;
+}
+
+static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
+{
+ if (type >= DFL_FPGA_DEVT_MAX)
+ return 0;
+
+ return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
+}
+
+/**
+ * dfl_fpga_dev_ops_register - register cdev ops for feature dev
+ *
+ * @pdev: feature dev.
+ * @fops: file operations for feature dev's cdev.
+ * @owner: owning module/driver.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_init(&pdata->cdev, fops);
+ pdata->cdev.owner = owner;
+
+ /*
+ * set parent to the feature device so that its refcount is
+ * decreased after the last refcount of cdev is gone, that
+ * makes sure the feature device is valid during device
+ * file's life-cycle.
+ */
+ pdata->cdev.kobj.parent = &pdev->dev.kobj;
+
+ return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
+
+/**
+ * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
+ * @pdev: feature dev.
+ */
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_del(&pdata->cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
+
+/**
+ * struct build_feature_devs_info - info collected during feature dev build.
+ *
+ * @dev: device to enumerate.
+ * @cdev: the container device for all feature devices.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
+ * @feature_dev: current feature device.
+ * @ioaddr: header register region address of current FIU in enumeration.
+ * @start: register resource start of current FIU.
+ * @len: max register resource length of current FIU.
+ * @sub_features: a sub features linked list for feature device in enumeration.
+ * @feature_num: number of sub features for feature device in enumeration.
+ */
+struct build_feature_devs_info {
+ struct device *dev;
+ struct dfl_fpga_cdev *cdev;
+ unsigned int nr_irqs;
+ int *irq_table;
+
+ struct platform_device *feature_dev;
+ void __iomem *ioaddr;
+ resource_size_t start;
+ resource_size_t len;
+ struct list_head sub_features;
+ int feature_num;
+};
+
+/**
+ * struct dfl_feature_info - sub feature info collected during feature dev build
+ *
+ * @fid: id of this sub feature.
+ * @revision: revision of this sub feature
+ * @dfh_version: version of Device Feature Header (DFH)
+ * @mmio_res: mmio resource of this sub feature.
+ * @ioaddr: mapped base address of mmio resource.
+ * @node: node in sub_features linked list.
+ * @irq_base: start of irq index in this sub feature.
+ * @nr_irqs: number of irqs of this sub feature.
+ * @param_size: size DFH parameters.
+ * @params: DFH parameter data.
+ */
+struct dfl_feature_info {
+ u16 fid;
+ u8 revision;
+ u8 dfh_version;
+ struct resource mmio_res;
+ void __iomem *ioaddr;
+ struct list_head node;
+ unsigned int irq_base;
+ unsigned int nr_irqs;
+ unsigned int param_size;
+ u64 params[];
+};
+
+static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
+ struct platform_device *port)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
+
+ mutex_lock(&cdev->lock);
+ list_add(&pdata->node, &cdev->port_dev_list);
+ get_device(&pdata->dev->dev);
+ mutex_unlock(&cdev->lock);
+}
+
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct platform_device *fdev = binfo->feature_dev;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_info *finfo, *p;
+ enum dfl_id_type type;
+ int ret, index = 0, res_idx = 0;
+
+ type = feature_dev_id_type(fdev);
+ if (WARN_ON_ONCE(type >= DFL_ID_MAX))
+ return -EINVAL;
+
+ /*
+ * we do not need to care for the memory which is associated with
+ * the platform device. After calling platform_device_unregister(),
+ * it will be automatically freed by device's release() callback,
+ * platform_device_release().
+ */
+ pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->dev = fdev;
+ pdata->num = binfo->feature_num;
+ pdata->dfl_cdev = binfo->cdev;
+ pdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&pdata->lock);
+ lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ dfl_pdata_key_strings[type]);
+
+ /*
+ * the count should be initialized to 0 to make sure
+ *__fpga_port_enable() following __fpga_port_disable()
+ * works properly for port device.
+ * and it should always be 0 for fme device.
+ */
+ WARN_ON(pdata->disable_count);
+
+ fdev->dev.platform_data = pdata;
+
+ /* each sub feature has one MMIO resource */
+ fdev->num_resources = binfo->feature_num;
+ fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
+ GFP_KERNEL);
+ if (!fdev->resource)
+ return -ENOMEM;
+
+ /* fill features and resource information for feature dev */
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature_irq_ctx *ctx;
+ unsigned int i;
+
+ /* save resource information for each feature */
+ feature->dev = fdev;
+ feature->id = finfo->fid;
+ feature->revision = finfo->revision;
+ feature->dfh_version = finfo->dfh_version;
+
+ if (finfo->param_size) {
+ feature->params = devm_kmemdup(binfo->dev,
+ finfo->params, finfo->param_size,
+ GFP_KERNEL);
+ if (!feature->params)
+ return -ENOMEM;
+
+ feature->param_size = finfo->param_size;
+ }
+ /*
+ * the FIU header feature has some fundamental functions (sriov
+ * set, port enable/disable) needed for the dfl bus device and
+ * other sub features. So its mmio resource should be mapped by
+ * DFL bus device. And we should not assign it to feature
+ * devices (dfl-fme/afu) again.
+ */
+ if (is_header_feature(feature)) {
+ feature->resource_index = -1;
+ feature->ioaddr =
+ devm_ioremap_resource(binfo->dev,
+ &finfo->mmio_res);
+ if (IS_ERR(feature->ioaddr))
+ return PTR_ERR(feature->ioaddr);
+ } else {
+ feature->resource_index = res_idx;
+ fdev->resource[res_idx++] = finfo->mmio_res;
+ }
+
+ if (finfo->nr_irqs) {
+ ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
+ sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < finfo->nr_irqs; i++)
+ ctx[i].irq =
+ binfo->irq_table[finfo->irq_base + i];
+
+ feature->irq_ctx = ctx;
+ feature->nr_irqs = finfo->nr_irqs;
+ }
+
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+
+ ret = platform_device_add(binfo->feature_dev);
+ if (!ret) {
+ if (type == PORT_ID)
+ dfl_fpga_cdev_add_port_dev(binfo->cdev,
+ binfo->feature_dev);
+ else
+ binfo->cdev->fme_dev =
+ get_device(&binfo->feature_dev->dev);
+ /*
+ * reset it to avoid build_info_free() freeing their resource.
+ *
+ * The resource of successfully registered feature devices
+ * will be freed by platform_device_unregister(). See the
+ * comments in build_info_create_dev().
+ */
+ binfo->feature_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum dfl_id_type type)
+{
+ struct platform_device *fdev;
+
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ /*
+ * we use -ENODEV as the initialization indicator which indicates
+ * whether the id need to be reclaimed
+ */
+ fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ if (!fdev)
+ return -ENOMEM;
+
+ binfo->feature_dev = fdev;
+ binfo->feature_num = 0;
+
+ INIT_LIST_HEAD(&binfo->sub_features);
+
+ fdev->id = dfl_id_alloc(type, &fdev->dev);
+ if (fdev->id < 0)
+ return fdev->id;
+
+ fdev->dev.parent = &binfo->cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+
+ return 0;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_info *finfo, *p;
+
+ /*
+ * it is a valid id, free it. See comments in
+ * build_info_create_dev()
+ */
+ if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
+ dfl_id_free(feature_dev_id_type(binfo->feature_dev),
+ binfo->feature_dev->id);
+
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+ }
+
+ platform_device_put(binfo->feature_dev);
+
+ devm_kfree(binfo->dev, binfo);
+}
+
+static inline u32 feature_size(u64 value)
+{
+ u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value);
+ /* workaround for private features with invalid size, use 4K instead */
+ return ofst ? ofst : 4096;
+}
+
+static u16 feature_id(u64 value)
+{
+ u16 id = FIELD_GET(DFH_ID, value);
+ u8 type = FIELD_GET(DFH_TYPE, value);
+
+ if (type == DFH_TYPE_FIU)
+ return FEATURE_ID_FIU_HEADER;
+ else if (type == DFH_TYPE_PRIVATE)
+ return id;
+ else if (type == DFH_TYPE_AFU)
+ return FEATURE_ID_AFU;
+
+ WARN_ON(1);
+ return 0;
+}
+
+static u64 *find_param(u64 *params, resource_size_t max, int param_id)
+{
+ u64 *end = params + max / sizeof(u64);
+ u64 v, next;
+
+ while (params < end) {
+ v = *params;
+ if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v))
+ return params;
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ break;
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ params += next;
+ }
+
+ return NULL;
+}
+
+/**
+ * dfh_find_param() - find parameter block for the given parameter id
+ * @dfl_dev: dfl device
+ * @param_id: id of dfl parameter
+ * @psize: destination to store size of parameter data in bytes
+ *
+ * Return: pointer to start of parameter data, PTR_ERR otherwise.
+ */
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize)
+{
+ u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id);
+
+ if (!phdr)
+ return ERR_PTR(-ENOENT);
+
+ if (psize)
+ *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64);
+
+ return phdr + 1;
+}
+EXPORT_SYMBOL_GPL(dfh_find_param);
+
+static int parse_feature_irqs(struct build_feature_devs_info *binfo,
+ resource_size_t ofst, struct dfl_feature_info *finfo)
+{
+ void __iomem *base = binfo->ioaddr + ofst;
+ unsigned int i, ibase, inr = 0;
+ void *params = finfo->params;
+ enum dfl_id_type type;
+ u16 fid = finfo->fid;
+ int virq;
+ u64 *p;
+ u64 v;
+
+ switch (finfo->dfh_version) {
+ case 0:
+ /*
+ * DFHv0 only provides MMIO resource information for each feature
+ * in the DFL header. There is no generic interrupt information.
+ * Instead, features with interrupt functionality provide
+ * the information in feature specific registers.
+ */
+ type = feature_dev_id_type(binfo->feature_dev);
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ switch (fid) {
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ }
+ break;
+
+ case 1:
+ /*
+ * DFHv1 provides interrupt resource information in DFHv1
+ * parameter blocks.
+ */
+ p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X);
+ if (!p)
+ break;
+
+ p++;
+ ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p);
+ inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p);
+ break;
+
+ default:
+ dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version);
+ break;
+ }
+
+ if (!inr) {
+ finfo->irq_base = 0;
+ finfo->nr_irqs = 0;
+ return 0;
+ }
+
+ dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
+ fid, ibase, inr);
+
+ if (ibase + inr > binfo->nr_irqs) {
+ dev_err(binfo->dev,
+ "Invalid interrupt number in feature 0x%x\n", fid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < inr; i++) {
+ virq = binfo->irq_table[ibase + i];
+ if (virq < 0 || virq > NR_IRQS) {
+ dev_err(binfo->dev,
+ "Invalid irq table entry for feature 0x%x\n",
+ fid);
+ return -EINVAL;
+ }
+ }
+
+ finfo->irq_base = ibase;
+ finfo->nr_irqs = inr;
+
+ return 0;
+}
+
+static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max)
+{
+ int size = 0;
+ u64 v, next;
+
+ if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS,
+ readq(dfh_base + DFHv1_CSR_SIZE_GRP)))
+ return 0;
+
+ while (size + DFHv1_PARAM_HDR < max) {
+ v = readq(dfh_base + DFHv1_PARAM_HDR + size);
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ if (!next)
+ return -EINVAL;
+
+ size += next * sizeof(u64);
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ return size;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * when create sub feature instances, for private features, it doesn't need
+ * to provide resource size and feature id as they could be read from DFH
+ * register. For afu sub feature, its register region only contains user
+ * defined registers, so never trust any information from it, just use the
+ * resource size information provided by its parent FIU.
+ */
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ resource_size_t ofst, resource_size_t size, u16 fid)
+{
+ struct dfl_feature_info *finfo;
+ resource_size_t start, end;
+ int dfh_psize = 0;
+ u8 revision = 0;
+ u64 v, addr_off;
+ u8 dfh_ver = 0;
+ int ret;
+
+ if (fid != FEATURE_ID_AFU) {
+ v = readq(binfo->ioaddr + ofst);
+ revision = FIELD_GET(DFH_REVISION, v);
+ dfh_ver = FIELD_GET(DFH_VERSION, v);
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(v);
+ fid = fid ? fid : feature_id(v);
+ if (dfh_ver == 1) {
+ dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size);
+ if (dfh_psize < 0) {
+ dev_err(binfo->dev,
+ "failed to read size of DFHv1 parameters %d\n",
+ dfh_psize);
+ return dfh_psize;
+ }
+ dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize);
+ }
+ }
+
+ if (binfo->len - ofst < size)
+ return -EINVAL;
+
+ finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL);
+ if (!finfo)
+ return -ENOMEM;
+
+ memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize);
+ finfo->param_size = dfh_psize;
+
+ finfo->fid = fid;
+ finfo->revision = revision;
+ finfo->dfh_version = dfh_ver;
+ if (dfh_ver == 1) {
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR);
+ addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v);
+ if (FIELD_GET(DFHv1_CSR_ADDR_REL, v))
+ start = addr_off << 1;
+ else
+ start = binfo->start + ofst + addr_off;
+
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP);
+ end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1;
+ } else {
+ start = binfo->start + ofst;
+ end = start + size - 1;
+ }
+ finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->mmio_res.start = start;
+ finfo->mmio_res.end = end;
+
+ ret = parse_feature_irqs(binfo, ofst, finfo);
+ if (ret) {
+ kfree(finfo);
+ return ret;
+ }
+
+ list_add_tail(&finfo->node, &binfo->sub_features);
+ binfo->feature_num++;
+
+ return 0;
+}
+
+static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
+ resource_size_t ofst)
+{
+ u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
+ u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
+
+ WARN_ON(!size);
+
+ return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
+}
+
+#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+
+static int parse_feature_afu(struct build_feature_devs_info *binfo,
+ resource_size_t ofst)
+{
+ if (!is_feature_dev_detected(binfo)) {
+ dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
+ return -EINVAL;
+ }
+
+ switch (feature_dev_id_type(binfo->feature_dev)) {
+ case PORT_ID:
+ return parse_feature_port_afu(binfo, ofst);
+ default:
+ dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
+ binfo->feature_dev->name);
+ }
+
+ return 0;
+}
+
+static int build_info_prepare(struct build_feature_devs_info *binfo,
+ resource_size_t start, resource_size_t len)
+{
+ struct device *dev = binfo->dev;
+ void __iomem *ioaddr;
+
+ if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
+ dev_err(dev, "request region fail, start:%pa, len:%pa\n",
+ &start, &len);
+ return -EBUSY;
+ }
+
+ ioaddr = devm_ioremap(dev, start, len);
+ if (!ioaddr) {
+ dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
+ &start, &len);
+ return -ENOMEM;
+ }
+
+ binfo->start = start;
+ binfo->len = len;
+ binfo->ioaddr = ioaddr;
+
+ return 0;
+}
+
+static void build_info_complete(struct build_feature_devs_info *binfo)
+{
+ devm_iounmap(binfo->dev, binfo->ioaddr);
+ devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ resource_size_t ofst)
+{
+ int ret = 0;
+ u32 offset;
+ u16 id;
+ u64 v;
+
+ if (is_feature_dev_detected(binfo)) {
+ build_info_complete(binfo);
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ ret = build_info_prepare(binfo, binfo->start + ofst,
+ binfo->len - ofst);
+ if (ret)
+ return ret;
+ }
+
+ v = readq(binfo->ioaddr + DFH);
+ id = FIELD_GET(DFH_ID, v);
+
+ /* create platform device for dfl feature dev */
+ ret = build_info_create_dev(binfo, dfh_id_to_type(id));
+ if (ret)
+ return ret;
+
+ ret = create_feature_instance(binfo, 0, 0, 0);
+ if (ret)
+ return ret;
+ /*
+ * find and parse FIU's child AFU via its NEXT_AFU register.
+ * please note that only Port has valid NEXT_AFU pointer per spec.
+ */
+ v = readq(binfo->ioaddr + NEXT_AFU);
+
+ offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
+ if (offset)
+ return parse_feature_afu(binfo, offset);
+
+ dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
+
+ return ret;
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ resource_size_t ofst)
+{
+ if (!is_feature_dev_detected(binfo)) {
+ dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
+ feature_id(readq(binfo->ioaddr + ofst)));
+ return -EINVAL;
+ }
+
+ return create_feature_instance(binfo, ofst, 0, 0);
+}
+
+/**
+ * parse_feature - parse a feature on given device feature list
+ *
+ * @binfo: build feature devices information.
+ * @ofst: offset to current FIU header
+ */
+static int parse_feature(struct build_feature_devs_info *binfo,
+ resource_size_t ofst)
+{
+ u64 v;
+ u32 type;
+
+ v = readq(binfo->ioaddr + ofst + DFH);
+ type = FIELD_GET(DFH_TYPE, v);
+
+ switch (type) {
+ case DFH_TYPE_AFU:
+ return parse_feature_afu(binfo, ofst);
+ case DFH_TYPE_PRIVATE:
+ return parse_feature_private(binfo, ofst);
+ case DFH_TYPE_FIU:
+ return parse_feature_fiu(binfo, ofst);
+ default:
+ dev_info(binfo->dev,
+ "Feature Type %x is not supported.\n", type);
+ }
+
+ return 0;
+}
+
+static int parse_feature_list(struct build_feature_devs_info *binfo,
+ resource_size_t start, resource_size_t len)
+{
+ resource_size_t end = start + len;
+ int ret = 0;
+ u32 ofst = 0;
+ u64 v;
+
+ ret = build_info_prepare(binfo, start, len);
+ if (ret)
+ return ret;
+
+ /* walk through the device feature list via DFH's next DFH pointer. */
+ for (; start < end; start += ofst) {
+ if (end - start < DFH_SIZE) {
+ dev_err(binfo->dev, "The region is too small to contain a feature.\n");
+ return -EINVAL;
+ }
+
+ ret = parse_feature(binfo, start - binfo->start);
+ if (ret)
+ return ret;
+
+ v = readq(binfo->ioaddr + start - binfo->start + DFH);
+ ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+
+ /* stop parsing if EOL(End of List) is set or offset is 0 */
+ if ((v & DFH_EOL) || !ofst)
+ break;
+ }
+
+ /* commit current feature device when reach the end of list */
+ build_info_complete(binfo);
+
+ if (is_feature_dev_detected(binfo))
+ ret = build_info_commit_dev(binfo);
+
+ return ret;
+}
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
+{
+ struct dfl_fpga_enum_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+ INIT_LIST_HEAD(&info->dfls);
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
+
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
+{
+ struct dfl_fpga_enum_dfl *tmp, *dfl;
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+
+ /* remove all device feature lists in the list. */
+ list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
+ list_del(&dfl->node);
+ devm_kfree(dev, dfl);
+ }
+
+ /* remove irq table */
+ if (info->irq_table)
+ devm_kfree(dev, info->irq_table);
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
+
+/**
+ * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @start: mmio resource address of the device feature list.
+ * @len: mmio resource length of the device feature list.
+ *
+ * One FPGA device may have one or more Device Feature Lists (DFLs), use this
+ * function to add information of each DFL to common data structure for next
+ * step enumeration.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len)
+{
+ struct dfl_fpga_enum_dfl *dfl;
+
+ dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
+ if (!dfl)
+ return -ENOMEM;
+
+ dfl->start = start;
+ dfl->len = len;
+
+ list_add_tail(&dfl->node, &info->dfls);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+
+/**
+ * dfl_fpga_enum_info_add_irq - add irq table to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
+ *
+ * One FPGA device may have several interrupts. This function adds irq
+ * information of the DFL fpga device to enum info for next step enumeration.
+ * This function should be called before dfl_fpga_feature_devs_enumerate().
+ * As we only support one irq domain for all DFLs in the same enum info, adding
+ * irq table a second time for the same enum info will return error.
+ *
+ * If we need to enumerate DFLs which belong to different irq domains, we
+ * should fill more enum info and enumerate them one by one.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table)
+{
+ if (!nr_irqs || !irq_table)
+ return -EINVAL;
+
+ if (info->irq_table)
+ return -EEXIST;
+
+ info->irq_table = devm_kmemdup(info->dev, irq_table,
+ sizeof(int) * nr_irqs, GFP_KERNEL);
+ if (!info->irq_table)
+ return -ENOMEM;
+
+ info->nr_irqs = nr_irqs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
+
+static int remove_feature_dev(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum dfl_id_type type = feature_dev_id_type(pdev);
+ int id = pdev->id;
+
+ platform_device_unregister(pdev);
+
+ dfl_id_free(type, id);
+
+ return 0;
+}
+
+static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
+{
+ device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
+}
+
+/**
+ * dfl_fpga_feature_devs_enumerate - enumerate feature devices
+ * @info: information for enumeration.
+ *
+ * This function creates a container device (base FPGA region), enumerates
+ * feature devices based on the enumeration info and creates platform devices
+ * under the container device.
+ *
+ * Return: dfl_fpga_cdev struct on success, -errno on failure
+ */
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
+{
+ struct build_feature_devs_info *binfo;
+ struct dfl_fpga_enum_dfl *dfl;
+ struct dfl_fpga_cdev *cdev;
+ int ret = 0;
+
+ if (!info->dev)
+ return ERR_PTR(-ENODEV);
+
+ cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ cdev->parent = info->dev;
+ mutex_init(&cdev->lock);
+ INIT_LIST_HEAD(&cdev->port_dev_list);
+
+ cdev->region = fpga_region_register(info->dev, NULL, NULL);
+ if (IS_ERR(cdev->region)) {
+ ret = PTR_ERR(cdev->region);
+ goto free_cdev_exit;
+ }
+
+ /* create and init build info for enumeration */
+ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ ret = -ENOMEM;
+ goto unregister_region_exit;
+ }
+
+ binfo->dev = info->dev;
+ binfo->cdev = cdev;
+
+ binfo->nr_irqs = info->nr_irqs;
+ if (info->nr_irqs)
+ binfo->irq_table = info->irq_table;
+
+ /*
+ * start enumeration for all feature devices based on Device Feature
+ * Lists.
+ */
+ list_for_each_entry(dfl, &info->dfls, node) {
+ ret = parse_feature_list(binfo, dfl->start, dfl->len);
+ if (ret) {
+ remove_feature_devs(cdev);
+ build_info_free(binfo);
+ goto unregister_region_exit;
+ }
+ }
+
+ build_info_free(binfo);
+
+ return cdev;
+
+unregister_region_exit:
+ fpga_region_unregister(cdev->region);
+free_cdev_exit:
+ devm_kfree(info->dev, cdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
+
+/**
+ * dfl_fpga_feature_devs_remove - remove all feature devices
+ * @cdev: fpga container device.
+ *
+ * Remove the container device and all feature devices under given container
+ * devices.
+ */
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata, *ptmp;
+
+ mutex_lock(&cdev->lock);
+ if (cdev->fme_dev)
+ put_device(cdev->fme_dev);
+
+ list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
+ struct platform_device *port_dev = pdata->dev;
+
+ /* remove released ports */
+ if (!device_is_registered(&port_dev->dev)) {
+ dfl_id_free(feature_dev_id_type(port_dev),
+ port_dev->id);
+ platform_device_put(port_dev);
+ }
+
+ list_del(&pdata->node);
+ put_device(&port_dev->dev);
+ }
+ mutex_unlock(&cdev->lock);
+
+ remove_feature_devs(cdev);
+
+ fpga_region_unregister(cdev->region);
+ devm_kfree(cdev->parent, cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
+
+/**
+ * __dfl_fpga_cdev_find_port - find a port under given container device
+ *
+ * @cdev: container device
+ * @data: data passed to match function
+ * @match: match function used to find specific port from the port device list
+ *
+ * Find a port device under container device. This function needs to be
+ * invoked with lock held.
+ *
+ * Return: pointer to port's platform device if successful, NULL otherwise.
+ *
+ * NOTE: you will need to drop the device reference with put_device() after use.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_dev;
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ port_dev = pdata->dev;
+
+ if (match(port_dev, data) && get_device(&port_dev->dev))
+ return port_dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+
+static int __init dfl_fpga_init(void)
+{
+ int ret;
+
+ ret = bus_register(&dfl_bus_type);
+ if (ret)
+ return ret;
+
+ dfl_ids_init();
+
+ ret = dfl_chardev_init();
+ if (ret) {
+ dfl_ids_destroy();
+ bus_unregister(&dfl_bus_type);
+ }
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_cdev_release_port - release a port platform device
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to release a port platform device. This is a
+ * mandatory step before turn a port from PF into VF for SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (!device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, true);
+ mutex_unlock(&pdata->lock);
+ if (ret)
+ goto put_dev_exit;
+
+ platform_device_del(port_pdev);
+ cdev->released_port_num++;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
+
+/**
+ * dfl_fpga_cdev_assign_port - assign a port platform device back
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to assign a port platform device back. This is
+ * a mandatory step after disable SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ ret = platform_device_add(port_pdev);
+ if (ret)
+ goto put_dev_exit;
+
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
+
+ cdev->released_port_num--;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
+
+static void config_port_access_mode(struct device *fme_dev, int port_id,
+ bool is_vf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_PORT_OFST(port_id));
+
+ v &= ~FME_PORT_OFST_ACC_CTRL;
+ v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
+ is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
+
+ writeq(v, base + FME_HDR_PORT_OFST(port_id));
+}
+
+#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
+#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
+
+/**
+ * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
+ *
+ * @cdev: parent container device.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the all released ports from VF access mode to PF.
+ */
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_pf_mode(cdev->fme_dev, pdata->id);
+ }
+ mutex_unlock(&cdev->lock);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
+
+/**
+ * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
+ *
+ * @cdev: parent container device.
+ * @num_vfs: VF device number.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the released ports from PF access mode to VF.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
+{
+ struct dfl_feature_platform_data *pdata;
+ int ret = 0;
+
+ mutex_lock(&cdev->lock);
+ /*
+ * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
+ * device, so if released port number doesn't match VF device number,
+ * then reject the request with -EINVAL error code.
+ */
+ if (cdev->released_port_num != num_vfs) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_vf_mode(cdev->fme_dev, pdata->id);
+ }
+done:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
+
+static irqreturn_t dfl_irq_handler(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = arg;
+
+ eventfd_signal(trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
+ int fd)
+{
+ struct platform_device *pdev = feature->dev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+
+ irq = feature->irq_ctx[idx].irq;
+
+ if (feature->irq_ctx[idx].trigger) {
+ free_irq(irq, feature->irq_ctx[idx].trigger);
+ kfree(feature->irq_ctx[idx].name);
+ eventfd_ctx_put(feature->irq_ctx[idx].trigger);
+ feature->irq_ctx[idx].trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+ feature->irq_ctx[idx].name =
+ kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
+ dev_name(&pdev->dev), feature->id);
+ if (!feature->irq_ctx[idx].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ ret = PTR_ERR(trigger);
+ goto free_name;
+ }
+
+ ret = request_irq(irq, dfl_irq_handler, 0,
+ feature->irq_ctx[idx].name, trigger);
+ if (!ret) {
+ feature->irq_ctx[idx].trigger = trigger;
+ return ret;
+ }
+
+ eventfd_ctx_put(trigger);
+free_name:
+ kfree(feature->irq_ctx[idx].name);
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
+ *
+ * @feature: dfl sub feature.
+ * @start: start of irq index in this dfl sub feature.
+ * @count: number of irqs.
+ * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
+ * unbind "count" specified number of irqs if fds ptr is NULL.
+ *
+ * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
+ * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
+ * NULL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds)
+{
+ unsigned int i;
+ int ret = 0;
+
+ /* overflow */
+ if (unlikely(start + count < start))
+ return -EINVAL;
+
+ /* exceeds nr_irqs */
+ if (start + count > feature->nr_irqs)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int fd = fds ? fds[i] : -1;
+
+ ret = do_set_irq_trigger(feature, start + i, fd);
+ if (ret) {
+ while (i--)
+ do_set_irq_trigger(feature, start + i, -1);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
+
+/**
+ * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ return put_user(feature->nr_irqs, (__u32 __user *)arg);
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
+
+/**
+ * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_irq_set hdr;
+ s32 *fds;
+ long ret;
+
+ if (!feature->nr_irqs)
+ return -ENOENT;
+
+ if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
+ (hdr.start + hdr.count < hdr.start))
+ return -EINVAL;
+
+ fds = memdup_user((void __user *)(arg + sizeof(hdr)),
+ array_size(hdr.count, sizeof(s32)));
+ if (IS_ERR(fds))
+ return PTR_ERR(fds);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
+ mutex_unlock(&pdata->lock);
+
+ kfree(fds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
+
+static void __exit dfl_fpga_exit(void)
+{
+ dfl_chardev_uinit();
+ dfl_ids_destroy();
+ bus_unregister(&dfl_bus_type);
+}
+
+module_init(dfl_fpga_init);
+module_exit(dfl_fpga_exit);
+
+MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
new file mode 100644
index 0000000000..1d724a28f0
--- /dev/null
+++ b/drivers/fpga/dfl.h
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver Header File for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#ifndef __FPGA_DFL_H
+#define __FPGA_DFL_H
+
+#include <linux/bitfield.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/eventfd.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/fpga/fpga-region.h>
+
+/* maximum supported number of ports */
+#define MAX_DFL_FPGA_PORT_NUM 4
+/* plus one for fme device */
+#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
+
+/* Reserved 0xfe for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0xfe
+#define FEATURE_ID_AFU 0xff
+
+#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define FME_FEATURE_ID_POWER_MGMT 0x2
+#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define FME_FEATURE_ID_PR_MGMT 0x5
+#define FME_FEATURE_ID_HSSI 0x6
+#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define PORT_FEATURE_ID_AFU FEATURE_ID_AFU
+#define PORT_FEATURE_ID_ERROR 0x10
+#define PORT_FEATURE_ID_UMSG 0x11
+#define PORT_FEATURE_ID_UINT 0x12
+#define PORT_FEATURE_ID_STP 0x13
+
+/*
+ * Device Feature Header Register Set
+ *
+ * For FIUs, they all have DFH + GUID + NEXT_AFU as common header registers.
+ * For AFUs, they have DFH + GUID as common header registers.
+ * For private features, they only have DFH register as common header.
+ */
+#define DFH 0x0
+#define GUID_L 0x8
+#define GUID_H 0x10
+#define NEXT_AFU 0x18
+
+#define DFH_SIZE 0x8
+
+/* Device Feature Header Register Bitfield */
+#define DFH_ID GENMASK_ULL(11, 0) /* Feature ID */
+#define DFH_ID_FIU_FME 0
+#define DFH_ID_FIU_PORT 1
+#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
+#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
+#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_VERSION GENMASK_ULL(59, 52) /* DFH version */
+#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
+#define DFH_TYPE_AFU 1
+#define DFH_TYPE_PRIVATE 3
+#define DFH_TYPE_FIU 4
+
+/*
+ * DFHv1 Register Offset definitons
+ * In DHFv1, DFH + GUID + CSR_START + CSR_SIZE_GROUP + PARAM_HDR + PARAM_DATA
+ * as common header registers
+ */
+#define DFHv1_CSR_ADDR 0x18 /* CSR Register start address */
+#define DFHv1_CSR_SIZE_GRP 0x20 /* Size of Reg Block and Group/tag */
+#define DFHv1_PARAM_HDR 0x28 /* Optional First Param header */
+
+/*
+ * CSR Rel Bit, 1'b0 = relative (offset from feature DFH start),
+ * 1'b1 = absolute (ARM or other non-PCIe use)
+ */
+#define DFHv1_CSR_ADDR_REL BIT_ULL(0)
+
+/* CSR Header Register Bit Definitions */
+#define DFHv1_CSR_ADDR_MASK GENMASK_ULL(63, 1) /* 63:1 of CSR address */
+
+/* CSR SIZE Goup Register Bit Definitions */
+#define DFHv1_CSR_SIZE_GRP_INSTANCE_ID GENMASK_ULL(15, 0) /* Enumeration instantiated IP */
+#define DFHv1_CSR_SIZE_GRP_GROUPING_ID GENMASK_ULL(30, 16) /* Group Features/interfaces */
+#define DFHv1_CSR_SIZE_GRP_HAS_PARAMS BIT_ULL(31) /* Presence of Parameters */
+#define DFHv1_CSR_SIZE_GRP_SIZE GENMASK_ULL(63, 32) /* Size of CSR Block in bytes */
+
+/* PARAM Header Register Bit Definitions */
+#define DFHv1_PARAM_HDR_ID GENMASK_ULL(15, 0) /* Id of this Param */
+#define DFHv1_PARAM_HDR_VER GENMASK_ULL(31, 16) /* Version Param */
+#define DFHv1_PARAM_HDR_NEXT_OFFSET GENMASK_ULL(63, 35) /* Offset of next Param */
+#define DFHv1_PARAM_HDR_NEXT_EOP BIT_ULL(32)
+#define DFHv1_PARAM_DATA 0x08 /* Offset of Param data from Param header */
+
+#define DFHv1_PARAM_ID_MSI_X 0x1
+#define DFHv1_PARAM_MSI_X_NUMV GENMASK_ULL(63, 32)
+#define DFHv1_PARAM_MSI_X_STARTV GENMASK_ULL(31, 0)
+
+/* Next AFU Register Bitfield */
+#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
+
+/* FME Header Register Set */
+#define FME_HDR_DFH DFH
+#define FME_HDR_GUID_L GUID_L
+#define FME_HDR_GUID_H GUID_H
+#define FME_HDR_NEXT_AFU NEXT_AFU
+#define FME_HDR_CAP 0x30
+#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_PORT_OFST_BAR_SKIP 7
+#define FME_HDR_BITSTREAM_ID 0x60
+#define FME_HDR_BITSTREAM_MD 0x68
+
+/* FME Fab Capability Register Bitfield */
+#define FME_CAP_FABRIC_VERID GENMASK_ULL(7, 0) /* Fabric version ID */
+#define FME_CAP_SOCKET_ID BIT_ULL(8) /* Socket ID */
+#define FME_CAP_PCIE0_LINK_AVL BIT_ULL(12) /* PCIE0 Link */
+#define FME_CAP_PCIE1_LINK_AVL BIT_ULL(13) /* PCIE1 Link */
+#define FME_CAP_COHR_LINK_AVL BIT_ULL(14) /* Coherent Link */
+#define FME_CAP_IOMMU_AVL BIT_ULL(16) /* IOMMU available */
+#define FME_CAP_NUM_PORTS GENMASK_ULL(19, 17) /* Number of ports */
+#define FME_CAP_ADDR_WIDTH GENMASK_ULL(29, 24) /* Address bus width */
+#define FME_CAP_CACHE_SIZE GENMASK_ULL(43, 32) /* cache size in KB */
+#define FME_CAP_CACHE_ASSOC GENMASK_ULL(47, 44) /* Associativity */
+
+/* FME Port Offset Register Bitfield */
+/* Offset to port device feature header */
+#define FME_PORT_OFST_DFH_OFST GENMASK_ULL(23, 0)
+/* PCI Bar ID for this port */
+#define FME_PORT_OFST_BAR_ID GENMASK_ULL(34, 32)
+/* AFU MMIO access permission. 1 - VF, 0 - PF. */
+#define FME_PORT_OFST_ACC_CTRL BIT_ULL(55)
+#define FME_PORT_OFST_ACC_PF 0
+#define FME_PORT_OFST_ACC_VF 1
+#define FME_PORT_OFST_IMP BIT_ULL(60)
+
+/* FME Error Capability Register */
+#define FME_ERROR_CAP 0x70
+
+/* FME Error Capability Register Bitfield */
+#define FME_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define FME_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
+/* PORT Header Register Set */
+#define PORT_HDR_DFH DFH
+#define PORT_HDR_GUID_L GUID_L
+#define PORT_HDR_GUID_H GUID_H
+#define PORT_HDR_NEXT_AFU NEXT_AFU
+#define PORT_HDR_CAP 0x30
+#define PORT_HDR_CTRL 0x38
+#define PORT_HDR_STS 0x40
+#define PORT_HDR_USRCLK_CMD0 0x50
+#define PORT_HDR_USRCLK_CMD1 0x58
+#define PORT_HDR_USRCLK_STS0 0x60
+#define PORT_HDR_USRCLK_STS1 0x68
+
+/* Port Capability Register Bitfield */
+#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
+#define PORT_CAP_MMIO_SIZE GENMASK_ULL(23, 8) /* MMIO size in KB */
+#define PORT_CAP_SUPP_INT_NUM GENMASK_ULL(35, 32) /* Interrupts num */
+
+/* Port Control Register Bitfield */
+#define PORT_CTRL_SFTRST BIT_ULL(0) /* Port soft reset */
+/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
+#define PORT_CTRL_LATENCY BIT_ULL(2)
+#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+
+/* Port Status Register Bitfield */
+#define PORT_STS_AP2_EVT BIT_ULL(13) /* AP2 event detected */
+#define PORT_STS_AP1_EVT BIT_ULL(12) /* AP1 event detected */
+#define PORT_STS_PWR_STATE GENMASK_ULL(11, 8) /* AFU power states */
+#define PORT_STS_PWR_STATE_NORM 0
+#define PORT_STS_PWR_STATE_AP1 1 /* 50% throttling */
+#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
+#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
+
+/* Port Error Capability Register */
+#define PORT_ERROR_CAP 0x38
+
+/* Port Error Capability Register Bitfield */
+#define PORT_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define PORT_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
+/* Port Uint Capability Register */
+#define PORT_UINT_CAP 0x8
+
+/* Port Uint Capability Register Bitfield */
+#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
+#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+
+/**
+ * struct dfl_fpga_port_ops - port ops
+ *
+ * @name: name of this port ops, to match with port platform device.
+ * @owner: pointer to the module which owns this port ops.
+ * @node: node to link port ops to global list.
+ * @get_id: get port id from hardware.
+ * @enable_set: enable/disable the port.
+ */
+struct dfl_fpga_port_ops {
+ const char *name;
+ struct module *owner;
+ struct list_head node;
+ int (*get_id)(struct platform_device *pdev);
+ int (*enable_set)(struct platform_device *pdev, bool enable);
+};
+
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+
+/**
+ * struct dfl_feature_id - dfl private feature id
+ *
+ * @id: unique dfl private feature id.
+ */
+struct dfl_feature_id {
+ u16 id;
+};
+
+/**
+ * struct dfl_feature_driver - dfl private feature driver
+ *
+ * @id_table: id_table for dfl private features supported by this driver.
+ * @ops: ops of this dfl private feature driver.
+ */
+struct dfl_feature_driver {
+ const struct dfl_feature_id *id_table;
+ const struct dfl_feature_ops *ops;
+};
+
+/**
+ * struct dfl_feature_irq_ctx - dfl private feature interrupt context
+ *
+ * @irq: Linux IRQ number of this interrupt.
+ * @trigger: eventfd context to signal when interrupt happens.
+ * @name: irq name needed when requesting irq.
+ */
+struct dfl_feature_irq_ctx {
+ int irq;
+ struct eventfd_ctx *trigger;
+ char *name;
+};
+
+/**
+ * struct dfl_feature - sub feature of the feature devices
+ *
+ * @dev: ptr to pdev of the feature device which has the sub feature.
+ * @id: sub feature id.
+ * @revision: revision of this sub feature.
+ * @resource_index: each sub feature has one mmio resource for its registers.
+ * this index is used to find its mmio resource from the
+ * feature dev (platform device)'s resources.
+ * @ioaddr: mapped mmio resource address.
+ * @irq_ctx: interrupt context list.
+ * @nr_irqs: number of interrupt contexts.
+ * @ops: ops of this sub feature.
+ * @ddev: ptr to the dfl device of this sub feature.
+ * @priv: priv data of this feature.
+ * @dfh_version: version of the DFH
+ * @param_size: size of dfh parameters
+ * @params: point to memory copy of dfh parameters
+ */
+struct dfl_feature {
+ struct platform_device *dev;
+ u16 id;
+ u8 revision;
+ int resource_index;
+ void __iomem *ioaddr;
+ struct dfl_feature_irq_ctx *irq_ctx;
+ unsigned int nr_irqs;
+ const struct dfl_feature_ops *ops;
+ struct dfl_device *ddev;
+ void *priv;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
+};
+
+#define FEATURE_DEV_ID_UNUSED (-1)
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @node: node to link feature devs to container device's port_dev_list.
+ * @lock: mutex to protect platform data.
+ * @cdev: cdev of feature dev.
+ * @dev: ptr to platform device linked with this platform data.
+ * @dfl_cdev: ptr to container device.
+ * @id: id used for this feature device.
+ * @disable_count: count for port disable.
+ * @excl_open: set on feature device exclusive open.
+ * @open_count: count for feature device open.
+ * @num: number for sub features.
+ * @private: ptr to feature dev private data.
+ * @features: sub features of this feature dev.
+ */
+struct dfl_feature_platform_data {
+ struct list_head node;
+ struct mutex lock;
+ struct cdev cdev;
+ struct platform_device *dev;
+ struct dfl_fpga_cdev *dfl_cdev;
+ int id;
+ unsigned int disable_count;
+ bool excl_open;
+ int open_count;
+ void *private;
+ int num;
+ struct dfl_feature features[];
+};
+
+static inline
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+ bool excl)
+{
+ if (pdata->excl_open)
+ return -EBUSY;
+
+ if (excl) {
+ if (pdata->open_count)
+ return -EBUSY;
+
+ pdata->excl_open = true;
+ }
+ pdata->open_count++;
+
+ return 0;
+}
+
+static inline
+void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+{
+ pdata->excl_open = false;
+
+ if (WARN_ON(pdata->open_count <= 0))
+ return;
+
+ pdata->open_count--;
+}
+
+static inline
+int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->open_count;
+}
+
+static inline
+void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+ void *private)
+{
+ pdata->private = private;
+}
+
+static inline
+void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->private;
+}
+
+struct dfl_feature_ops {
+ int (*init)(struct platform_device *pdev, struct dfl_feature *feature);
+ void (*uinit)(struct platform_device *pdev,
+ struct dfl_feature *feature);
+ long (*ioctl)(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg);
+};
+
+#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
+#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
+
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs);
+
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner);
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
+
+static inline
+struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
+ cdev);
+ return pdata->dev;
+}
+
+#define dfl_fpga_dev_for_each_feature(pdata, feature) \
+ for ((feature) = (pdata)->features; \
+ (feature) < (pdata)->features + (pdata)->num; (feature)++)
+
+static inline
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->id == id)
+ return feature;
+
+ return NULL;
+}
+
+static inline
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+{
+ struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+
+ if (feature && feature->ioaddr)
+ return feature->ioaddr;
+
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline bool is_dfl_feature_present(struct device *dev, u16 id)
+{
+ return !!dfl_get_feature_ioaddr_by_id(dev, id);
+}
+
+static inline
+struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->dev->dev.parent->parent;
+}
+
+static inline bool dfl_feature_is_fme(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_FME);
+}
+
+static inline bool dfl_feature_is_port(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
+}
+
+static inline u8 dfl_feature_revision(void __iomem *base)
+{
+ return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH));
+}
+
+/**
+ * struct dfl_fpga_enum_info - DFL FPGA enumeration information
+ *
+ * @dev: parent device.
+ * @dfls: list of device feature lists.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by hw irq numbers.
+ */
+struct dfl_fpga_enum_info {
+ struct device *dev;
+ struct list_head dfls;
+ unsigned int nr_irqs;
+ int *irq_table;
+};
+
+/**
+ * struct dfl_fpga_enum_dfl - DFL FPGA enumeration device feature list info
+ *
+ * @start: base address of this device feature list.
+ * @len: size of this device feature list.
+ * @node: node in list of device feature lists.
+ */
+struct dfl_fpga_enum_dfl {
+ resource_size_t start;
+ resource_size_t len;
+ struct list_head node;
+};
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len);
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table);
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
+
+/**
+ * struct dfl_fpga_cdev - container device of DFL based FPGA
+ *
+ * @parent: parent device of this container device.
+ * @region: base fpga region.
+ * @fme_dev: FME feature device under this container device.
+ * @lock: mutex lock to protect the port device list.
+ * @port_dev_list: list of all port feature devices under this container device.
+ * @released_port_num: released port number under this container device.
+ */
+struct dfl_fpga_cdev {
+ struct device *parent;
+ struct fpga_region *region;
+ struct device *fme_dev;
+ struct mutex lock;
+ struct list_head port_dev_list;
+ int released_port_num;
+};
+
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
+
+/*
+ * need to drop the device reference with put_device() after use port platform
+ * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
+ * functions.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *));
+
+static inline struct platform_device *
+dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct platform_device *pdev;
+
+ mutex_lock(&cdev->lock);
+ pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ mutex_unlock(&cdev->lock);
+
+ return pdev;
+}
+
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds);
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+
+#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
new file mode 100644
index 0000000000..a024be2b84
--- /dev/null
+++ b/drivers/fpga/fpga-bridge.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Bridge Framework Driver
+ *
+ * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ * Copyright (C) 2017 Intel Corporation
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_bridge_ida);
+static const struct class fpga_bridge_class;
+
+/* Lock for adding/removing bridges to linked lists*/
+static DEFINE_SPINLOCK(bridge_list_lock);
+
+/**
+ * fpga_bridge_enable - Enable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_enable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "enable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_enable);
+
+/**
+ * fpga_bridge_disable - Disable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_disable(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "disable\n");
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ return bridge->br_ops->enable_set(bridge, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info)
+{
+ struct fpga_bridge *bridge;
+ int ret = -ENODEV;
+
+ bridge = to_fpga_bridge(dev);
+
+ bridge->info = info;
+
+ if (!mutex_trylock(&bridge->mutex)) {
+ ret = -EBUSY;
+ goto err_dev;
+ }
+
+ if (!try_module_get(dev->parent->driver->owner))
+ goto err_ll_mod;
+
+ dev_dbg(&bridge->dev, "get\n");
+
+ return bridge;
+
+err_ll_mod:
+ mutex_unlock(&bridge->mutex);
+err_dev:
+ put_device(dev);
+ return ERR_PTR(ret);
+}
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to an fpga bridge
+ *
+ * @np: node pointer of an FPGA bridge.
+ * @info: fpga image specific information.
+ *
+ * Return:
+ * * fpga_bridge struct pointer if successful.
+ * * -EBUSY if someone already has a reference to the bridge.
+ * * -ENODEV if @np is not an FPGA Bridge or can't take parent driver refcount.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_of_node(&fpga_bridge_class, np);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_bridge_get(dev, info);
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+static int fpga_bridge_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_bridge_get - get an exclusive reference to an fpga bridge
+ * @dev: parent device that fpga bridge was registered with
+ * @info: fpga image specific information
+ *
+ * Given a device, get an exclusive reference to an fpga bridge.
+ *
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
+ */
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info)
+{
+ struct device *bridge_dev;
+
+ bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
+ fpga_bridge_dev_match);
+ if (!bridge_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_bridge_get(bridge_dev, info);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get);
+
+/**
+ * fpga_bridge_put - release a reference to a bridge
+ *
+ * @bridge: FPGA bridge
+ */
+void fpga_bridge_put(struct fpga_bridge *bridge)
+{
+ dev_dbg(&bridge->dev, "put\n");
+
+ bridge->info = NULL;
+ module_put(bridge->dev.parent->driver->owner);
+ mutex_unlock(&bridge->mutex);
+ put_device(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_put);
+
+/**
+ * fpga_bridges_enable - enable bridges in a list
+ * @bridge_list: list of FPGA bridges
+ *
+ * Enable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return: 0 for success or empty bridge list or an error code otherwise.
+ */
+int fpga_bridges_enable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ int ret;
+
+ list_for_each_entry(bridge, bridge_list, node) {
+ ret = fpga_bridge_enable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_enable);
+
+/**
+ * fpga_bridges_disable - disable bridges in a list
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * Disable each bridge in the list. If list is empty, do nothing.
+ *
+ * Return: 0 for success or empty bridge list or an error code otherwise.
+ */
+int fpga_bridges_disable(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ int ret;
+
+ list_for_each_entry(bridge, bridge_list, node) {
+ ret = fpga_bridge_disable(bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_disable);
+
+/**
+ * fpga_bridges_put - put bridges
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * For each bridge in the list, put the bridge and remove it from the list.
+ * If list is empty, do nothing.
+ */
+void fpga_bridges_put(struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge, *next;
+ unsigned long flags;
+
+ list_for_each_entry_safe(bridge, next, bridge_list, node) {
+ fpga_bridge_put(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_del(&bridge->node);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_put);
+
+/**
+ * of_fpga_bridge_get_to_list - get a bridge, add it to a list
+ *
+ * @np: node pointer of an FPGA bridge
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and it to the list.
+ *
+ * Return: 0 for success, error code from of_fpga_bridge_get() otherwise.
+ */
+int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = of_fpga_bridge_get(np, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list);
+
+/**
+ * fpga_bridge_get_to_list - given device, get a bridge, add it to a list
+ *
+ * @dev: FPGA bridge device
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and it to the list.
+ *
+ * Return: 0 for success, error code from fpga_bridge_get() otherwise.
+ */
+int fpga_bridge_get_to_list(struct device *dev,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = fpga_bridge_get(dev, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ return sprintf(buf, "%s\n", bridge->name);
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ int state = 1;
+
+ if (bridge->br_ops && bridge->br_ops->enable_show) {
+ state = bridge->br_ops->enable_show(bridge);
+ if (state < 0)
+ return state;
+ }
+
+ return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *fpga_bridge_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_bridge);
+
+/**
+ * fpga_bridge_register - create and register an FPGA Bridge device
+ * @parent: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * Return: struct fpga_bridge pointer or ERR_PTR()
+ */
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv)
+{
+ struct fpga_bridge *bridge;
+ int id, ret;
+
+ if (!br_ops) {
+ dev_err(parent, "Attempt to register without fpga_bridge_ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!name || !strlen(name)) {
+ dev_err(parent, "Attempt to register with no name!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&fpga_bridge_ida, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto error_kfree;
+ }
+
+ mutex_init(&bridge->mutex);
+ INIT_LIST_HEAD(&bridge->node);
+
+ bridge->name = name;
+ bridge->br_ops = br_ops;
+ bridge->priv = priv;
+
+ bridge->dev.groups = br_ops->groups;
+ bridge->dev.class = &fpga_bridge_class;
+ bridge->dev.parent = parent;
+ bridge->dev.of_node = parent->of_node;
+ bridge->dev.id = id;
+
+ ret = dev_set_name(&bridge->dev, "br%d", id);
+ if (ret)
+ goto error_device;
+
+ ret = device_register(&bridge->dev);
+ if (ret) {
+ put_device(&bridge->dev);
+ return ERR_PTR(ret);
+ }
+
+ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+
+ return bridge;
+
+error_device:
+ ida_free(&fpga_bridge_ida, id);
+error_kfree:
+ kfree(bridge);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_register);
+
+/**
+ * fpga_bridge_unregister - unregister an FPGA bridge
+ *
+ * @bridge: FPGA bridge struct
+ *
+ * This function is intended for use in an FPGA bridge driver's remove function.
+ */
+void fpga_bridge_unregister(struct fpga_bridge *bridge)
+{
+ /*
+ * If the low level driver provides a method for putting bridge into
+ * a desired state upon unregister, do it.
+ */
+ if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ bridge->br_ops->fpga_bridge_remove(bridge);
+
+ device_unregister(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
+
+static void fpga_bridge_dev_release(struct device *dev)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_free(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
+}
+
+static const struct class fpga_bridge_class = {
+ .name = "fpga_bridge",
+ .dev_groups = fpga_bridge_groups,
+ .dev_release = fpga_bridge_dev_release,
+};
+
+static int __init fpga_bridge_dev_init(void)
+{
+ return class_register(&fpga_bridge_class);
+}
+
+static void __exit fpga_bridge_dev_exit(void)
+{
+ class_unregister(&fpga_bridge_class);
+ ida_destroy(&fpga_bridge_ida);
+}
+
+MODULE_DESCRIPTION("FPGA Bridge Driver");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_bridge_dev_init);
+module_exit(fpga_bridge_dev_exit);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
new file mode 100644
index 0000000000..06651389c5
--- /dev/null
+++ b/drivers/fpga/fpga-mgr.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Core
+ *
+ * Copyright (C) 2013-2015 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * With code from the mailing list:
+ * Copyright (C) 2013 Xilinx, Inc.
+ */
+#include <linux/firmware.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+
+static DEFINE_IDA(fpga_mgr_ida);
+static const struct class fpga_mgr_class;
+
+struct fpga_mgr_devres {
+ struct fpga_manager *mgr;
+};
+
+static inline void fpga_mgr_fpga_remove(struct fpga_manager *mgr)
+{
+ if (mgr->mops->fpga_remove)
+ mgr->mops->fpga_remove(mgr);
+}
+
+static inline enum fpga_mgr_states fpga_mgr_state(struct fpga_manager *mgr)
+{
+ if (mgr->mops->state)
+ return mgr->mops->state(mgr);
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline u64 fpga_mgr_status(struct fpga_manager *mgr)
+{
+ if (mgr->mops->status)
+ return mgr->mops->status(mgr);
+ return 0;
+}
+
+static inline int fpga_mgr_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ if (mgr->mops->write)
+ return mgr->mops->write(mgr, buf, count);
+ return -EOPNOTSUPP;
+}
+
+/*
+ * After all the FPGA image has been written, do the device specific steps to
+ * finish and set the FPGA into operating mode.
+ */
+static inline int fpga_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ int ret = 0;
+
+ mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
+ if (mgr->mops->write_complete)
+ ret = mgr->mops->write_complete(mgr, info);
+ if (ret) {
+ dev_err(&mgr->dev, "Error after writing image data to FPGA\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ mgr->state = FPGA_MGR_STATE_OPERATING;
+
+ return 0;
+}
+
+static inline int fpga_mgr_parse_header(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ if (mgr->mops->parse_header)
+ return mgr->mops->parse_header(mgr, info, buf, count);
+ return 0;
+}
+
+static inline int fpga_mgr_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ if (mgr->mops->write_init)
+ return mgr->mops->write_init(mgr, info, buf, count);
+ return 0;
+}
+
+static inline int fpga_mgr_write_sg(struct fpga_manager *mgr,
+ struct sg_table *sgt)
+{
+ if (mgr->mops->write_sg)
+ return mgr->mops->write_sg(mgr, sgt);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * fpga_image_info_alloc - Allocate an FPGA image info struct
+ * @dev: owning device
+ *
+ * Return: struct fpga_image_info or NULL
+ */
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev)
+{
+ struct fpga_image_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_alloc);
+
+/**
+ * fpga_image_info_free - Free an FPGA image info struct
+ * @info: FPGA image info struct to free
+ */
+void fpga_image_info_free(struct fpga_image_info *info)
+{
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+ if (info->firmware_name)
+ devm_kfree(dev, info->firmware_name);
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_free);
+
+/*
+ * Call the low level driver's parse_header function with entire FPGA image
+ * buffer on the input. This will set info->header_size and info->data_size.
+ */
+static int fpga_mgr_parse_header_mapped(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER;
+ ret = fpga_mgr_parse_header(mgr, info, buf, count);
+
+ if (info->header_size + info->data_size > count) {
+ dev_err(&mgr->dev, "Bitstream data outruns FPGA image\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ }
+
+ return ret;
+}
+
+/*
+ * Call the low level driver's parse_header function with first fragment of
+ * scattered FPGA image on the input. If header fits first fragment,
+ * parse_header will set info->header_size and info->data_size. If it is not,
+ * parse_header will set desired size to info->header_size and -EAGAIN will be
+ * returned.
+ */
+static int fpga_mgr_parse_header_sg_first(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
+{
+ struct sg_mapping_iter miter;
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER;
+
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ if (sg_miter_next(&miter) &&
+ miter.length >= info->header_size)
+ ret = fpga_mgr_parse_header(mgr, info, miter.addr, miter.length);
+ else
+ ret = -EAGAIN;
+ sg_miter_stop(&miter);
+
+ if (ret && ret != -EAGAIN) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ }
+
+ return ret;
+}
+
+/*
+ * Copy scattered FPGA image fragments to temporary buffer and call the
+ * low level driver's parse_header function. This should be called after
+ * fpga_mgr_parse_header_sg_first() returned -EAGAIN. In case of success,
+ * pointer to the newly allocated image header copy will be returned and
+ * its size will be set into *ret_size. Returned buffer needs to be freed.
+ */
+static void *fpga_mgr_parse_header_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt, size_t *ret_size)
+{
+ size_t len, new_header_size, header_size = 0;
+ char *new_buf, *buf = NULL;
+ int ret;
+
+ do {
+ new_header_size = info->header_size;
+ if (new_header_size <= header_size) {
+ dev_err(&mgr->dev, "Requested invalid header size\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ new_buf = krealloc(buf, new_header_size, GFP_KERNEL);
+ if (!new_buf) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ buf = new_buf;
+
+ len = sg_pcopy_to_buffer(sgt->sgl, sgt->nents,
+ buf + header_size,
+ new_header_size - header_size,
+ header_size);
+ if (len != new_header_size - header_size) {
+ ret = -EFAULT;
+ break;
+ }
+
+ header_size = new_header_size;
+ ret = fpga_mgr_parse_header(mgr, info, buf, header_size);
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error while parsing FPGA image header\n");
+ mgr->state = FPGA_MGR_STATE_PARSE_HEADER_ERR;
+ kfree(buf);
+ buf = ERR_PTR(ret);
+ }
+
+ *ret_size = header_size;
+
+ return buf;
+}
+
+/*
+ * Call the low level driver's write_init function. This will do the
+ * device-specific things to get the FPGA into the state where it is ready to
+ * receive an FPGA image. The low level driver gets to see at least first
+ * info->header_size bytes in the buffer. If info->header_size is 0,
+ * write_init will not get any bytes of image buffer.
+ */
+static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ size_t header_size = info->header_size;
+ int ret;
+
+ mgr->state = FPGA_MGR_STATE_WRITE_INIT;
+
+ if (header_size > count)
+ ret = -EINVAL;
+ else if (!header_size)
+ ret = fpga_mgr_write_init(mgr, info, NULL, 0);
+ else
+ ret = fpga_mgr_write_init(mgr, info, buf, count);
+
+ if (ret) {
+ dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fpga_mgr_prepare_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
+{
+ struct sg_mapping_iter miter;
+ size_t len;
+ char *buf;
+ int ret;
+
+ /* Short path. Low level driver don't care about image header. */
+ if (!mgr->mops->initial_header_size && !mgr->mops->parse_header)
+ return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
+
+ /*
+ * First try to use miter to map the first fragment to access the
+ * header, this is the typical path.
+ */
+ ret = fpga_mgr_parse_header_sg_first(mgr, info, sgt);
+ /* If 0, header fits first fragment, call write_init on it */
+ if (!ret) {
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+ if (sg_miter_next(&miter)) {
+ ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
+ miter.length);
+ sg_miter_stop(&miter);
+ return ret;
+ }
+ sg_miter_stop(&miter);
+ /*
+ * If -EAGAIN, more sg buffer is needed,
+ * otherwise an error has occurred.
+ */
+ } else if (ret != -EAGAIN) {
+ return ret;
+ }
+
+ /*
+ * Copy the fragments into temporary memory.
+ * Copying is done inside fpga_mgr_parse_header_sg().
+ */
+ buf = fpga_mgr_parse_header_sg(mgr, info, sgt, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+/**
+ * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list
+ * @mgr: fpga manager
+ * @info: fpga image specific information
+ * @sgt: scatterlist table
+ *
+ * Step the low level fpga manager through the device-specific steps of getting
+ * an FPGA ready to be configured, writing the image to it, then doing whatever
+ * post-configuration steps necessary. This code assumes the caller got the
+ * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
+ * not an error code.
+ *
+ * This is the preferred entry point for FPGA programming, it does not require
+ * any contiguous kernel memory.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
+{
+ int ret;
+
+ ret = fpga_mgr_prepare_sg(mgr, info, sgt);
+ if (ret)
+ return ret;
+
+ /* Write the FPGA image to the FPGA. */
+ mgr->state = FPGA_MGR_STATE_WRITE;
+ if (mgr->mops->write_sg) {
+ ret = fpga_mgr_write_sg(mgr, sgt);
+ } else {
+ size_t length, count = 0, data_size = info->data_size;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+
+ if (mgr->mops->skip_header &&
+ !sg_miter_skip(&miter, info->header_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ while (sg_miter_next(&miter)) {
+ if (data_size)
+ length = min(miter.length, data_size - count);
+ else
+ length = miter.length;
+
+ ret = fpga_mgr_write(mgr, miter.addr, length);
+ if (ret)
+ break;
+
+ count += length;
+ if (data_size && count >= data_size)
+ break;
+ }
+ sg_miter_stop(&miter);
+ }
+
+out:
+ if (ret) {
+ dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ return fpga_mgr_write_complete(mgr, info);
+}
+
+static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = fpga_mgr_parse_header_mapped(mgr, info, buf, count);
+ if (ret)
+ return ret;
+
+ ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
+ if (ret)
+ return ret;
+
+ if (mgr->mops->skip_header) {
+ buf += info->header_size;
+ count -= info->header_size;
+ }
+
+ if (info->data_size)
+ count = info->data_size;
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ mgr->state = FPGA_MGR_STATE_WRITE;
+ ret = fpga_mgr_write(mgr, buf, count);
+ if (ret) {
+ dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
+ mgr->state = FPGA_MGR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ return fpga_mgr_write_complete(mgr, info);
+}
+
+/**
+ * fpga_mgr_buf_load - load fpga from image in buffer
+ * @mgr: fpga manager
+ * @info: fpga image info
+ * @buf: buffer contain fpga image
+ * @count: byte count of buf
+ *
+ * Step the low level fpga manager through the device-specific steps of getting
+ * an FPGA ready to be configured, writing the image to it, then doing whatever
+ * post-configuration steps necessary. This code assumes the caller got the
+ * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int fpga_mgr_buf_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct page **pages;
+ struct sg_table sgt;
+ const void *p;
+ int nr_pages;
+ int index;
+ int rc;
+
+ /*
+ * This is just a fast path if the caller has already created a
+ * contiguous kernel buffer and the driver doesn't require SG, non-SG
+ * drivers will still work on the slow path.
+ */
+ if (mgr->mops->write)
+ return fpga_mgr_buf_load_mapped(mgr, info, buf, count);
+
+ /*
+ * Convert the linear kernel pointer into a sg_table of pages for use
+ * by the driver.
+ */
+ nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
+ (unsigned long)buf / PAGE_SIZE;
+ pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ p = buf - offset_in_page(buf);
+ for (index = 0; index < nr_pages; index++) {
+ if (is_vmalloc_addr(p))
+ pages[index] = vmalloc_to_page(p);
+ else
+ pages[index] = kmap_to_page((void *)p);
+ if (!pages[index]) {
+ kfree(pages);
+ return -EFAULT;
+ }
+ p += PAGE_SIZE;
+ }
+
+ /*
+ * The temporary pages list is used to code share the merging algorithm
+ * in sg_alloc_table_from_pages
+ */
+ rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf),
+ count, GFP_KERNEL);
+ kfree(pages);
+ if (rc)
+ return rc;
+
+ rc = fpga_mgr_buf_load_sg(mgr, info, &sgt);
+ sg_free_table(&sgt);
+
+ return rc;
+}
+
+/**
+ * fpga_mgr_firmware_load - request firmware and load to fpga
+ * @mgr: fpga manager
+ * @info: fpga image specific information
+ * @image_name: name of image file on the firmware search path
+ *
+ * Request an FPGA image using the firmware class, then write out to the FPGA.
+ * Update the state before each step to provide info on what step failed if
+ * there is a failure. This code assumes the caller got the mgr pointer
+ * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error
+ * code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *image_name)
+{
+ struct device *dev = &mgr->dev;
+ const struct firmware *fw;
+ int ret;
+
+ dev_info(dev, "writing %s to %s\n", image_name, mgr->name);
+
+ mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ;
+
+ ret = request_firmware(&fw, image_name, dev);
+ if (ret) {
+ mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ_ERR;
+ dev_err(dev, "Error requesting firmware %s\n", image_name);
+ return ret;
+ }
+
+ ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/**
+ * fpga_mgr_load - load FPGA from scatter/gather table, buffer, or firmware
+ * @mgr: fpga manager
+ * @info: fpga image information.
+ *
+ * Load the FPGA from an image which is indicated in @info. If successful, the
+ * FPGA ends up in operating mode.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+ info->header_size = mgr->mops->initial_header_size;
+
+ if (info->sgt)
+ return fpga_mgr_buf_load_sg(mgr, info, info->sgt);
+ if (info->buf && info->count)
+ return fpga_mgr_buf_load(mgr, info, info->buf, info->count);
+ if (info->firmware_name)
+ return fpga_mgr_firmware_load(mgr, info, info->firmware_name);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_load);
+
+static const char * const state_str[] = {
+ [FPGA_MGR_STATE_UNKNOWN] = "unknown",
+ [FPGA_MGR_STATE_POWER_OFF] = "power off",
+ [FPGA_MGR_STATE_POWER_UP] = "power up",
+ [FPGA_MGR_STATE_RESET] = "reset",
+
+ /* requesting FPGA image from firmware */
+ [FPGA_MGR_STATE_FIRMWARE_REQ] = "firmware request",
+ [FPGA_MGR_STATE_FIRMWARE_REQ_ERR] = "firmware request error",
+
+ /* Parse FPGA image header */
+ [FPGA_MGR_STATE_PARSE_HEADER] = "parse header",
+ [FPGA_MGR_STATE_PARSE_HEADER_ERR] = "parse header error",
+
+ /* Preparing FPGA to receive image */
+ [FPGA_MGR_STATE_WRITE_INIT] = "write init",
+ [FPGA_MGR_STATE_WRITE_INIT_ERR] = "write init error",
+
+ /* Writing image to FPGA */
+ [FPGA_MGR_STATE_WRITE] = "write",
+ [FPGA_MGR_STATE_WRITE_ERR] = "write error",
+
+ /* Finishing configuration after image has been written */
+ [FPGA_MGR_STATE_WRITE_COMPLETE] = "write complete",
+ [FPGA_MGR_STATE_WRITE_COMPLETE_ERR] = "write complete error",
+
+ /* FPGA reports to be in normal operating mode */
+ [FPGA_MGR_STATE_OPERATING] = "operating",
+};
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return sprintf(buf, "%s\n", mgr->name);
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return sprintf(buf, "%s\n", state_str[mgr->state]);
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ u64 status;
+ int len = 0;
+
+ status = fpga_mgr_status(mgr);
+
+ if (status & FPGA_MGR_STATUS_OPERATION_ERR)
+ len += sprintf(buf + len, "reconfig operation error\n");
+ if (status & FPGA_MGR_STATUS_CRC_ERR)
+ len += sprintf(buf + len, "reconfig CRC error\n");
+ if (status & FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR)
+ len += sprintf(buf + len, "reconfig incompatible image\n");
+ if (status & FPGA_MGR_STATUS_IP_PROTOCOL_ERR)
+ len += sprintf(buf + len, "reconfig IP protocol error\n");
+ if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
+ len += sprintf(buf + len, "reconfig fifo overflow error\n");
+
+ return len;
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *fpga_mgr_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ &dev_attr_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_mgr);
+
+static struct fpga_manager *__fpga_mgr_get(struct device *dev)
+{
+ struct fpga_manager *mgr;
+
+ mgr = to_fpga_manager(dev);
+
+ if (!try_module_get(dev->parent->driver->owner))
+ goto err_dev;
+
+ return mgr;
+
+err_dev:
+ put_device(dev);
+ return ERR_PTR(-ENODEV);
+}
+
+static int fpga_mgr_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_mgr_get - Given a device, get a reference to an fpga mgr.
+ * @dev: parent device that fpga mgr was registered with
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *fpga_mgr_get(struct device *dev)
+{
+ struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev,
+ fpga_mgr_dev_match);
+ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(mgr_dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+/**
+ * of_fpga_mgr_get - Given a device node, get a reference to an fpga mgr.
+ *
+ * @node: device node
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_of_node(&fpga_mgr_class, node);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_mgr_get(dev);
+}
+EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
+
+/**
+ * fpga_mgr_put - release a reference to an fpga manager
+ * @mgr: fpga manager structure
+ */
+void fpga_mgr_put(struct fpga_manager *mgr)
+{
+ module_put(mgr->dev.parent->driver->owner);
+ put_device(&mgr->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_put);
+
+/**
+ * fpga_mgr_lock - Lock FPGA manager for exclusive use
+ * @mgr: fpga manager
+ *
+ * Given a pointer to FPGA Manager (from fpga_mgr_get() or
+ * of_fpga_mgr_put()) attempt to get the mutex. The user should call
+ * fpga_mgr_lock() and verify that it returns 0 before attempting to
+ * program the FPGA. Likewise, the user should call fpga_mgr_unlock
+ * when done programming the FPGA.
+ *
+ * Return: 0 for success or -EBUSY
+ */
+int fpga_mgr_lock(struct fpga_manager *mgr)
+{
+ if (!mutex_trylock(&mgr->ref_mutex)) {
+ dev_err(&mgr->dev, "FPGA manager is in use.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_lock);
+
+/**
+ * fpga_mgr_unlock - Unlock FPGA manager after done programming
+ * @mgr: fpga manager
+ */
+void fpga_mgr_unlock(struct fpga_manager *mgr)
+{
+ mutex_unlock(&mgr->ref_mutex);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+
+/**
+ * fpga_mgr_register_full - create and register an FPGA Manager device
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
+ *
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register_full() instead is recommended.
+ *
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+ */
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+{
+ const struct fpga_manager_ops *mops = info->mops;
+ struct fpga_manager *mgr;
+ int id, ret;
+
+ if (!mops) {
+ dev_err(parent, "Attempt to register without fpga_manager_ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!info->name || !strlen(info->name)) {
+ dev_err(parent, "Attempt to register with no name!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&fpga_mgr_ida, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto error_kfree;
+ }
+
+ mutex_init(&mgr->ref_mutex);
+
+ mgr->name = info->name;
+ mgr->mops = info->mops;
+ mgr->priv = info->priv;
+ mgr->compat_id = info->compat_id;
+
+ mgr->dev.class = &fpga_mgr_class;
+ mgr->dev.groups = mops->groups;
+ mgr->dev.parent = parent;
+ mgr->dev.of_node = parent->of_node;
+ mgr->dev.id = id;
+
+ ret = dev_set_name(&mgr->dev, "fpga%d", id);
+ if (ret)
+ goto error_device;
+
+ /*
+ * Initialize framework state by requesting low level driver read state
+ * from device. FPGA may be in reset mode or may have been programmed
+ * by bootloader or EEPROM.
+ */
+ mgr->state = fpga_mgr_state(mgr);
+
+ ret = device_register(&mgr->dev);
+ if (ret) {
+ put_device(&mgr->dev);
+ return ERR_PTR(ret);
+ }
+
+ return mgr;
+
+error_device:
+ ida_free(&fpga_mgr_ida, id);
+error_kfree:
+ kfree(mgr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
+
+/**
+ * fpga_mgr_register - create and register an FPGA Manager device
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register() instead is recommended. This simple
+ * version of the register function should be sufficient for most users. The
+ * fpga_mgr_register_full() function is available for users that need to pass
+ * additional, optional parameters.
+ *
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+ */
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
+{
+ struct fpga_manager_info info = { 0 };
+
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
+
+ return fpga_mgr_register_full(parent, &info);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_register);
+
+/**
+ * fpga_mgr_unregister - unregister an FPGA manager
+ * @mgr: fpga manager struct
+ *
+ * This function is intended for use in an FPGA manager driver's remove function.
+ */
+void fpga_mgr_unregister(struct fpga_manager *mgr)
+{
+ dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name);
+
+ /*
+ * If the low level driver provides a method for putting fpga into
+ * a desired state upon unregister, do it.
+ */
+ fpga_mgr_fpga_remove(mgr);
+
+ device_unregister(&mgr->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
+
+static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+{
+ struct fpga_mgr_devres *dr = res;
+
+ fpga_mgr_unregister(dr->mgr);
+}
+
+/**
+ * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
+ *
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
+ * This is the devres variant of fpga_mgr_register_full() for which the unregister
+ * function will be called automatically when the managing device is detached.
+ */
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+{
+ struct fpga_mgr_devres *dr;
+ struct fpga_manager *mgr;
+
+ dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr = fpga_mgr_register_full(parent, info);
+ if (IS_ERR(mgr)) {
+ devres_free(dr);
+ return mgr;
+ }
+
+ dr->mgr = mgr;
+ devres_add(parent, dr);
+
+ return mgr;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+
+/**
+ * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
+ * This is the devres variant of fpga_mgr_register() for which the
+ * unregister function will be called automatically when the managing
+ * device is detached.
+ */
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
+{
+ struct fpga_manager_info info = { 0 };
+
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
+
+ return devm_fpga_mgr_register_full(parent, &info);
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
+
+static void fpga_mgr_dev_release(struct device *dev)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ ida_free(&fpga_mgr_ida, mgr->dev.id);
+ kfree(mgr);
+}
+
+static const struct class fpga_mgr_class = {
+ .name = "fpga_manager",
+ .dev_groups = fpga_mgr_groups,
+ .dev_release = fpga_mgr_dev_release,
+};
+
+static int __init fpga_mgr_class_init(void)
+{
+ pr_info("FPGA manager framework\n");
+
+ return class_register(&fpga_mgr_class);
+}
+
+static void __exit fpga_mgr_class_exit(void)
+{
+ class_unregister(&fpga_mgr_class);
+ ida_destroy(&fpga_mgr_ida);
+}
+
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_DESCRIPTION("FPGA manager framework");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_mgr_class_init);
+module_exit(fpga_mgr_class_exit);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
new file mode 100644
index 0000000000..b364a92942
--- /dev/null
+++ b/drivers/fpga/fpga-region.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region - Support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_region_ida);
+static const struct class fpga_region_class;
+
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *))
+{
+ struct device *dev;
+
+ dev = class_find_device(&fpga_region_class, start, data, match);
+ if (!dev)
+ return NULL;
+
+ return to_fpga_region(dev);
+}
+EXPORT_SYMBOL_GPL(fpga_region_class_find);
+
+/**
+ * fpga_region_get - get an exclusive reference to an fpga region
+ * @region: FPGA Region struct
+ *
+ * Caller should call fpga_region_put() when done with region.
+ *
+ * Return:
+ * * fpga_region struct if successful.
+ * * -EBUSY if someone already has a reference to the region.
+ * * -ENODEV if can't take parent driver module refcount.
+ */
+static struct fpga_region *fpga_region_get(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ if (!mutex_trylock(&region->mutex)) {
+ dev_dbg(dev, "%s: FPGA Region already in use\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ get_device(dev);
+ if (!try_module_get(dev->parent->driver->owner)) {
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev_dbg(dev, "get\n");
+
+ return region;
+}
+
+/**
+ * fpga_region_put - release a reference to a region
+ *
+ * @region: FPGA region
+ */
+static void fpga_region_put(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+
+ dev_dbg(dev, "put\n");
+
+ module_put(dev->parent->driver->owner);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+}
+
+/**
+ * fpga_region_program_fpga - program FPGA
+ *
+ * @region: FPGA region
+ *
+ * Program an FPGA using fpga image info (region->info).
+ * If the region has a get_bridges function, the exclusive reference for the
+ * bridges will be held if programming succeeds. This is intended to prevent
+ * reprogramming the region until the caller considers it safe to do so.
+ * The caller will need to call fpga_bridges_put() before attempting to
+ * reprogram the region.
+ *
+ * Return: 0 for success or negative error code.
+ */
+int fpga_region_program_fpga(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info = region->info;
+ int ret;
+
+ region = fpga_region_get(region);
+ if (IS_ERR(region)) {
+ dev_err(dev, "failed to get FPGA region\n");
+ return PTR_ERR(region);
+ }
+
+ ret = fpga_mgr_lock(region->mgr);
+ if (ret) {
+ dev_err(dev, "FPGA manager is busy\n");
+ goto err_put_region;
+ }
+
+ /*
+ * In some cases, we already have a list of bridges in the
+ * fpga region struct. Or we don't have any bridges.
+ */
+ if (region->get_bridges) {
+ ret = region->get_bridges(region);
+ if (ret) {
+ dev_err(dev, "failed to get fpga region bridges\n");
+ goto err_unlock_mgr;
+ }
+ }
+
+ ret = fpga_bridges_disable(&region->bridge_list);
+ if (ret) {
+ dev_err(dev, "failed to disable bridges\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_mgr_load(region->mgr, info);
+ if (ret) {
+ dev_err(dev, "failed to load FPGA image\n");
+ goto err_put_br;
+ }
+
+ ret = fpga_bridges_enable(&region->bridge_list);
+ if (ret) {
+ dev_err(dev, "failed to enable region bridges\n");
+ goto err_put_br;
+ }
+
+ fpga_mgr_unlock(region->mgr);
+ fpga_region_put(region);
+
+ return 0;
+
+err_put_br:
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+err_unlock_mgr:
+ fpga_mgr_unlock(region->mgr);
+err_put_region:
+ fpga_region_put(region);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
+
+static ssize_t compat_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ if (!region->compat_id)
+ return -ENOENT;
+
+ return sprintf(buf, "%016llx%016llx\n",
+ (unsigned long long)region->compat_id->id_h,
+ (unsigned long long)region->compat_id->id_l);
+}
+
+static DEVICE_ATTR_RO(compat_id);
+
+static struct attribute *fpga_region_attrs[] = {
+ &dev_attr_compat_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_region);
+
+/**
+ * fpga_region_register_full - create and register an FPGA Region device
+ * @parent: device parent
+ * @info: parameters for FPGA Region
+ *
+ * Return: struct fpga_region or ERR_PTR()
+ */
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
+{
+ struct fpga_region *region;
+ int id, ret = 0;
+
+ if (!info) {
+ dev_err(parent,
+ "Attempt to register without required info structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&fpga_region_ida, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_free;
+ }
+
+ region->mgr = info->mgr;
+ region->compat_id = info->compat_id;
+ region->priv = info->priv;
+ region->get_bridges = info->get_bridges;
+
+ mutex_init(&region->mutex);
+ INIT_LIST_HEAD(&region->bridge_list);
+
+ region->dev.class = &fpga_region_class;
+ region->dev.parent = parent;
+ region->dev.of_node = parent->of_node;
+ region->dev.id = id;
+
+ ret = dev_set_name(&region->dev, "region%d", id);
+ if (ret)
+ goto err_remove;
+
+ ret = device_register(&region->dev);
+ if (ret) {
+ put_device(&region->dev);
+ return ERR_PTR(ret);
+ }
+
+ return region;
+
+err_remove:
+ ida_free(&fpga_region_ida, id);
+err_free:
+ kfree(region);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(fpga_region_register_full);
+
+/**
+ * fpga_region_register - create and register an FPGA Region device
+ * @parent: device parent
+ * @mgr: manager that programs this region
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * This simple version of the register function should be sufficient for most users.
+ * The fpga_region_register_full() function is available for users that need to
+ * pass additional, optional parameters.
+ *
+ * Return: struct fpga_region or ERR_PTR()
+ */
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *))
+{
+ struct fpga_region_info info = { 0 };
+
+ info.mgr = mgr;
+ info.get_bridges = get_bridges;
+
+ return fpga_region_register_full(parent, &info);
+}
+EXPORT_SYMBOL_GPL(fpga_region_register);
+
+/**
+ * fpga_region_unregister - unregister an FPGA region
+ * @region: FPGA region
+ *
+ * This function is intended for use in an FPGA region driver's remove function.
+ */
+void fpga_region_unregister(struct fpga_region *region)
+{
+ device_unregister(&region->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_region_unregister);
+
+static void fpga_region_dev_release(struct device *dev)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_free(&fpga_region_ida, region->dev.id);
+ kfree(region);
+}
+
+static const struct class fpga_region_class = {
+ .name = "fpga_region",
+ .dev_groups = fpga_region_groups,
+ .dev_release = fpga_region_dev_release,
+};
+
+/**
+ * fpga_region_init - creates the fpga_region class.
+ *
+ * Return: 0 on success or ERR_PTR() on error.
+ */
+static int __init fpga_region_init(void)
+{
+ return class_register(&fpga_region_class);
+}
+
+static void __exit fpga_region_exit(void)
+{
+ class_unregister(&fpga_region_class);
+ ida_destroy(&fpga_region_ida);
+}
+
+subsys_initcall(fpga_region_init);
+module_exit(fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
new file mode 100644
index 0000000000..7cbb3558b8
--- /dev/null
+++ b/drivers/fpga/ice40-spi.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FPGA Manager Driver for Lattice iCE40.
+ *
+ * Copyright (c) 2016 Joel Holdsworth
+ *
+ * This driver adds support to the FPGA manager for configuring the SRAM of
+ * Lattice iCE40 FPGAs through slave SPI.
+ */
+
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/stringify.h>
+
+#define ICE40_SPI_MAX_SPEED 25000000 /* Hz */
+#define ICE40_SPI_MIN_SPEED 1000000 /* Hz */
+
+#define ICE40_SPI_RESET_DELAY 1 /* us (>200ns) */
+#define ICE40_SPI_HOUSEKEEPING_DELAY 1200 /* us */
+
+#define ICE40_SPI_NUM_ACTIVATION_BYTES DIV_ROUND_UP(49, 8)
+
+struct ice40_fpga_priv {
+ struct spi_device *dev;
+ struct gpio_desc *reset;
+ struct gpio_desc *cdone;
+};
+
+static enum fpga_mgr_states ice40_fpga_ops_state(struct fpga_manager *mgr)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+
+ return gpiod_get_value(priv->cdone) ? FPGA_MGR_STATE_OPERATING :
+ FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+ struct spi_device *dev = priv->dev;
+ struct spi_message message;
+ struct spi_transfer assert_cs_then_reset_delay = {
+ .cs_change = 1,
+ .delay = {
+ .value = ICE40_SPI_RESET_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
+ };
+ struct spi_transfer housekeeping_delay_then_release_cs = {
+ .delay = {
+ .value = ICE40_SPI_HOUSEKEEPING_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
+ };
+ int ret;
+
+ if ((info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(&dev->dev,
+ "Partial reconfiguration is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* Lock the bus, assert CRESET_B and SS_B and delay >200ns */
+ spi_bus_lock(dev->master);
+
+ gpiod_set_value(priv->reset, 1);
+
+ spi_message_init(&message);
+ spi_message_add_tail(&assert_cs_then_reset_delay, &message);
+ ret = spi_sync_locked(dev, &message);
+
+ /* Come out of reset */
+ gpiod_set_value(priv->reset, 0);
+
+ /* Abort if the chip-select failed */
+ if (ret)
+ goto fail;
+
+ /* Check CDONE is de-asserted i.e. the FPGA is reset */
+ if (gpiod_get_value(priv->cdone)) {
+ dev_err(&dev->dev, "Device reset failed, CDONE is asserted\n");
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Wait for the housekeeping to complete, and release SS_B */
+ spi_message_init(&message);
+ spi_message_add_tail(&housekeeping_delay_then_release_cs, &message);
+ ret = spi_sync_locked(dev, &message);
+
+fail:
+ spi_bus_unlock(dev->master);
+
+ return ret;
+}
+
+static int ice40_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+
+ return spi_write(priv->dev, buf, count);
+}
+
+static int ice40_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+ struct spi_device *dev = priv->dev;
+ const u8 padding[ICE40_SPI_NUM_ACTIVATION_BYTES] = {0};
+
+ /* Check CDONE is asserted */
+ if (!gpiod_get_value(priv->cdone)) {
+ dev_err(&dev->dev,
+ "CDONE was not asserted after firmware transfer\n");
+ return -EIO;
+ }
+
+ /* Send of zero-padding to activate the firmware */
+ return spi_write(dev, padding, sizeof(padding));
+}
+
+static const struct fpga_manager_ops ice40_fpga_ops = {
+ .state = ice40_fpga_ops_state,
+ .write_init = ice40_fpga_ops_write_init,
+ .write = ice40_fpga_ops_write,
+ .write_complete = ice40_fpga_ops_write_complete,
+};
+
+static int ice40_fpga_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ice40_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = spi;
+
+ /* Check board setup data. */
+ if (spi->max_speed_hz > ICE40_SPI_MAX_SPEED) {
+ dev_err(dev, "SPI speed is too high, maximum speed is "
+ __stringify(ICE40_SPI_MAX_SPEED) "\n");
+ return -EINVAL;
+ }
+
+ if (spi->max_speed_hz < ICE40_SPI_MIN_SPEED) {
+ dev_err(dev, "SPI speed is too low, minimum speed is "
+ __stringify(ICE40_SPI_MIN_SPEED) "\n");
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPHA) {
+ dev_err(dev, "Bad SPI mode, CPHA not supported\n");
+ return -EINVAL;
+ }
+
+ /* Set up the GPIOs */
+ priv->cdone = devm_gpiod_get(dev, "cdone", GPIOD_IN);
+ if (IS_ERR(priv->cdone)) {
+ ret = PTR_ERR(priv->cdone);
+ dev_err(dev, "Failed to get CDONE GPIO: %d\n", ret);
+ return ret;
+ }
+
+ priv->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ dev_err(dev, "Failed to get CRESET_B GPIO: %d\n", ret);
+ return ret;
+ }
+
+ mgr = devm_fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static const struct of_device_id ice40_fpga_of_match[] = {
+ { .compatible = "lattice,ice40-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
+
+static const struct spi_device_id ice40_fpga_spi_ids[] = {
+ { .name = "ice40-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids);
+
+static struct spi_driver ice40_fpga_driver = {
+ .probe = ice40_fpga_probe,
+ .driver = {
+ .name = "ice40spi",
+ .of_match_table = of_match_ptr(ice40_fpga_of_match),
+ },
+ .id_table = ice40_fpga_spi_ids,
+};
+
+module_spi_driver(ice40_fpga_driver);
+
+MODULE_AUTHOR("Joel Holdsworth <joel@airwebreathe.org.uk>");
+MODULE_DESCRIPTION("Lattice iCE40 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
new file mode 100644
index 0000000000..31af2e08c8
--- /dev/null
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX10 Board Management Controller Secure Update Driver
+ *
+ * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct m10bmc_sec;
+
+struct m10bmc_sec_ops {
+ int (*rsu_status)(struct m10bmc_sec *sec);
+};
+
+struct m10bmc_sec {
+ struct device *dev;
+ struct intel_m10bmc *m10bmc;
+ struct fw_upload *fwl;
+ char *fw_name;
+ u32 fw_name_id;
+ bool cancel_request;
+ const struct m10bmc_sec_ops *ops;
+};
+
+static DEFINE_XARRAY_ALLOC(fw_upload_xa);
+
+/* Root Entry Hash (REH) support */
+#define REH_SHA256_SIZE 32
+#define REH_SHA384_SIZE 48
+#define REH_MAGIC GENMASK(15, 0)
+#define REH_SHA_NUM_BYTES GENMASK(31, 16)
+
+static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 write_count = size / stride;
+ u32 leftover_offset = write_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp = 0;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
+ buf + offset, write_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
+ if (leftover_size) {
+ memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
+ ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
+ leftover_tmp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 read_count = size / stride;
+ u32 leftover_offset = read_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
+ if (leftover_size) {
+ ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
+ if (ret)
+ return ret;
+ memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
+ }
+
+ return 0;
+}
+
+
+static ssize_t
+show_root_entry_hash(struct device *dev, u32 exp_magic,
+ u32 prog_addr, u32 reh_addr, char *buf)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ int sha_num_bytes, i, ret, cnt = 0;
+ u8 hash[REH_SHA384_SIZE];
+ u32 magic;
+
+ ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
+ return sysfs_emit(buf, "hash not programmed\n");
+
+ sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
+ if (sha_num_bytes != REH_SHA256_SIZE &&
+ sha_num_bytes != REH_SHA384_SIZE) {
+ dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
+ sha_num_bytes);
+ return -EINVAL;
+ }
+
+ ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
+ if (ret) {
+ dev_err(dev, "failed to read root entry hash\n");
+ return ret;
+ }
+
+ for (i = 0; i < sha_num_bytes; i++)
+ cnt += sprintf(buf + cnt, "%02x", hash[i]);
+ cnt += sprintf(buf + cnt, "\n");
+
+ return cnt;
+}
+
+#define DEVICE_ATTR_SEC_REH_RO(_name) \
+static ssize_t _name##_root_entry_hash_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_root_entry_hash(dev, csr_map->_name##_magic, \
+ csr_map->_name##_prog_addr, \
+ csr_map->_name##_reh_addr, \
+ buf); \
+} \
+static DEVICE_ATTR_RO(_name##_root_entry_hash)
+
+DEVICE_ATTR_SEC_REH_RO(bmc);
+DEVICE_ATTR_SEC_REH_RO(sr);
+DEVICE_ATTR_SEC_REH_RO(pr);
+
+#define CSK_BIT_LEN 128U
+#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
+
+static ssize_t
+show_canceled_csk(struct device *dev, u32 addr, char *buf)
+{
+ unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
+ __le32 csk_le32[CSK_32ARRAY_SIZE];
+ u32 csk32[CSK_32ARRAY_SIZE];
+ int ret;
+
+ ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
+ if (ret) {
+ dev_err(sec->dev, "failed to read CSK vector\n");
+ return ret;
+ }
+
+ for (i = 0; i < CSK_32ARRAY_SIZE; i++)
+ csk32[i] = le32_to_cpu(((csk_le32[i])));
+
+ bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
+ bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
+ return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
+}
+
+#define DEVICE_ATTR_SEC_CSK_RO(_name) \
+static ssize_t _name##_canceled_csks_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_canceled_csk(dev, \
+ csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
+ buf); \
+} \
+static DEVICE_ATTR_RO(_name##_canceled_csks)
+
+#define CSK_VEC_OFFSET 0x34
+
+DEVICE_ATTR_SEC_CSK_RO(bmc);
+DEVICE_ATTR_SEC_CSK_RO(sr);
+DEVICE_ATTR_SEC_CSK_RO(pr);
+
+#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
+
+static ssize_t flash_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(dev);
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ unsigned int num_bits;
+ u8 *flash_buf;
+ int cnt, ret;
+
+ num_bits = FLASH_COUNT_SIZE * 8;
+
+ flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+ if (!flash_buf)
+ return -ENOMEM;
+
+ ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
+ FLASH_COUNT_SIZE);
+ if (ret) {
+ dev_err(sec->dev, "failed to read flash count\n");
+ goto exit_free;
+ }
+ cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
+
+exit_free:
+ kfree(flash_buf);
+
+ return ret ? : sysfs_emit(buf, "%u\n", cnt);
+}
+static DEVICE_ATTR_RO(flash_count);
+
+static struct attribute *m10bmc_security_attrs[] = {
+ &dev_attr_flash_count.attr,
+ &dev_attr_bmc_root_entry_hash.attr,
+ &dev_attr_sr_root_entry_hash.attr,
+ &dev_attr_pr_root_entry_hash.attr,
+ &dev_attr_sr_canceled_csks.attr,
+ &dev_attr_pr_canceled_csks.attr,
+ &dev_attr_bmc_canceled_csks.attr,
+ NULL,
+};
+
+static struct attribute_group m10bmc_security_attr_group = {
+ .name = "security",
+ .attrs = m10bmc_security_attrs,
+};
+
+static const struct attribute_group *m10bmc_sec_attr_groups[] = {
+ &m10bmc_security_attr_group,
+ NULL,
+};
+
+static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 auth_result;
+
+ dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
+
+ if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
+ dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
+}
+
+static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(DRBL_RSU_STATUS, doorbell);
+}
+
+static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 auth_result;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
+}
+
+static bool rsu_status_ok(u32 status)
+{
+ return (status == RSU_STAT_NORMAL ||
+ status == RSU_STAT_NIOS_OK ||
+ status == RSU_STAT_USER_OK ||
+ status == RSU_STAT_FACTORY_OK);
+}
+
+static bool rsu_progress_done(u32 progress)
+{
+ return (progress == RSU_PROG_IDLE ||
+ progress == RSU_PROG_RSU_DONE);
+}
+
+static bool rsu_progress_busy(u32 progress)
+{
+ return (progress == RSU_PROG_AUTHENTICATING ||
+ progress == RSU_PROG_COPYING ||
+ progress == RSU_PROG_UPDATE_CANCEL ||
+ progress == RSU_PROG_PROGRAM_KEY_HASH);
+}
+
+static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
+ u32 *progress, u32 *status)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
+ if (ret)
+ return ret;
+
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return ret;
+
+ *status = ret;
+ *progress = rsu_prog(*doorbell_reg);
+
+ return 0;
+}
+
+static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ if (!rsu_progress_done(rsu_prog(doorbell))) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_BUSY;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
+{
+ if (doorbell_reg & DRBL_RSU_REQUEST)
+ return false;
+
+ if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
+ return true;
+
+ if (!rsu_progress_done(progress))
+ return true;
+
+ return false;
+}
+
+static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, progress, status;
+ int ret, err;
+
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
+ DRBL_RSU_REQUEST |
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_IDLE));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ ret = read_poll_timeout(m10bmc_sec_progress_status, err,
+ err < 0 || rsu_start_done(doorbell_reg, progress, status),
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US,
+ false,
+ sec, &doorbell_reg, &progress, &status);
+
+ if (ret == -ETIMEDOUT) {
+ log_error_regs(sec, doorbell_reg);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (err) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ if (status == RSU_STAT_WEAROUT) {
+ dev_warn(sec->dev, "Excessive flash update count detected\n");
+ return FW_UPLOAD_ERR_WEAROUT;
+ } else if (status == RSU_STAT_ERASE_FAIL) {
+ log_error_regs(sec, doorbell_reg);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ unsigned long poll_timeout;
+ u32 doorbell, progress;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS);
+ while (rsu_prog(doorbell) == RSU_PROG_PREPARE) {
+ msleep(RSU_PREP_INTERVAL_MS);
+ if (time_after(jiffies, poll_timeout))
+ break;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ progress = rsu_prog(doorbell);
+ if (progress == RSU_PROG_PREPARE) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (progress != RSU_PROG_READY) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, status;
+ int ret;
+
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_WRITE_DONE));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
+ csr_map->base + csr_map->doorbell,
+ doorbell_reg,
+ rsu_prog(doorbell_reg) != RSU_PROG_READY,
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US);
+
+ if (ret == -ETIMEDOUT) {
+ log_error_regs(sec, doorbell_reg);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (ret) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_HW_ERROR;
+ status = ret;
+
+ if (!rsu_status_ok(status)) {
+ log_error_regs(sec, doorbell_reg);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
+{
+ u32 progress, status;
+
+ if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
+ return -EIO;
+
+ if (!rsu_status_ok(status))
+ return -EINVAL;
+
+ if (rsu_progress_done(progress))
+ return 0;
+
+ if (rsu_progress_busy(progress))
+ return -EAGAIN;
+
+ return -EINVAL;
+}
+
+static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ if (rsu_prog(doorbell) != RSU_PROG_READY)
+ return FW_UPLOAD_ERR_BUSY;
+
+ ret = m10bmc_sys_update_bits(sec->m10bmc, csr_map->doorbell,
+ DRBL_HOST_STATUS,
+ FIELD_PREP(DRBL_HOST_STATUS,
+ HOST_STATUS_ABORT_RSU));
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ return FW_UPLOAD_ERR_CANCELED;
+}
+
+static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ u32 ret;
+
+ sec->cancel_request = false;
+
+ if (!size || size > M10BMC_STAGING_SIZE)
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
+ return FW_UPLOAD_ERR_BUSY;
+
+ ret = rsu_check_idle(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ goto unlock_flash;
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PREPARE);
+
+ ret = rsu_update_init(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ goto fw_state_exit;
+
+ ret = rsu_prog_ready(sec);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ goto fw_state_exit;
+
+ if (sec->cancel_request) {
+ ret = rsu_cancel(sec);
+ goto fw_state_exit;
+ }
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_WRITE);
+
+ return FW_UPLOAD_ERR_NONE;
+
+fw_state_exit:
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
+unlock_flash:
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
+ return ret;
+}
+
+#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
+
+static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ u32 blk_size, doorbell;
+ int ret;
+
+ if (sec->cancel_request)
+ return rsu_cancel(sec);
+
+ ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
+ if (ret) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ } else if (rsu_prog(doorbell) != RSU_PROG_READY) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
+ blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
+ ret = m10bmc_sec_write(sec, data, offset, blk_size);
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ *written = blk_size;
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+ unsigned long poll_timeout;
+ u32 doorbell, result;
+ int ret;
+
+ if (sec->cancel_request)
+ return rsu_cancel(sec);
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM);
+
+ result = rsu_send_data(sec);
+ if (result != FW_UPLOAD_ERR_NONE)
+ return result;
+
+ poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS);
+ do {
+ msleep(RSU_COMPLETE_INTERVAL_MS);
+ ret = rsu_check_complete(sec, &doorbell);
+ } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout));
+
+ if (ret == -EAGAIN) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_TIMEOUT;
+ } else if (ret == -EIO) {
+ return FW_UPLOAD_ERR_RW_ERROR;
+ } else if (ret) {
+ log_error_regs(sec, doorbell);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+/*
+ * m10bmc_sec_cancel() may be called asynchronously with an on-going update.
+ * All other functions are called sequentially in a single thread. To avoid
+ * contention on register accesses, m10bmc_sec_cancel() must only update
+ * the cancel_request flag. Other functions will check this flag and handle
+ * the cancel request synchronously.
+ */
+static void m10bmc_sec_cancel(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+
+ sec->cancel_request = true;
+}
+
+static void m10bmc_sec_cleanup(struct fw_upload *fwl)
+{
+ struct m10bmc_sec *sec = fwl->dd_handle;
+
+ (void)rsu_cancel(sec);
+
+ m10bmc_fw_state_set(sec->m10bmc, M10BMC_FW_STATE_NORMAL);
+
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
+}
+
+static const struct fw_upload_ops m10bmc_ops = {
+ .prepare = m10bmc_sec_prepare,
+ .write = m10bmc_sec_fw_write,
+ .poll_complete = m10bmc_sec_poll_complete,
+ .cancel = m10bmc_sec_cancel,
+ .cleanup = m10bmc_sec_cleanup,
+};
+
+static const struct m10bmc_sec_ops m10sec_n3000_ops = {
+ .rsu_status = m10bmc_sec_n3000_rsu_status,
+};
+
+static const struct m10bmc_sec_ops m10sec_n6000_ops = {
+ .rsu_status = m10bmc_sec_n6000_rsu_status,
+};
+
+#define SEC_UPDATE_LEN_MAX 32
+static int m10bmc_sec_probe(struct platform_device *pdev)
+{
+ char buf[SEC_UPDATE_LEN_MAX];
+ struct m10bmc_sec *sec;
+ struct fw_upload *fwl;
+ unsigned int len;
+ int ret;
+
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->dev = &pdev->dev;
+ sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
+ dev_set_drvdata(&pdev->dev, sec);
+
+ ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
+ xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
+ sec->fw_name_id);
+ sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
+
+ fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
+ &m10bmc_ops, sec);
+ if (IS_ERR(fwl)) {
+ dev_err(sec->dev, "Firmware Upload driver failed to start\n");
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
+ }
+
+ sec->fwl = fwl;
+ return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
+}
+
+static int m10bmc_sec_remove(struct platform_device *pdev)
+{
+ struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
+
+ firmware_upload_unregister(sec->fwl);
+ kfree(sec->fw_name);
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+
+ return 0;
+}
+
+static const struct platform_device_id intel_m10bmc_sec_ids[] = {
+ {
+ .name = "n3000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
+ },
+ {
+ .name = "d5005bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
+ },
+ {
+ .name = "n6000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
+
+static struct platform_driver intel_m10bmc_sec_driver = {
+ .probe = m10bmc_sec_probe,
+ .remove = m10bmc_sec_remove,
+ .driver = {
+ .name = "intel-m10bmc-sec-update",
+ .dev_groups = m10bmc_sec_attr_groups,
+ },
+ .id_table = intel_m10bmc_sec_ids,
+};
+module_platform_driver(intel_m10bmc_sec_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
diff --git a/drivers/fpga/lattice-sysconfig-spi.c b/drivers/fpga/lattice-sysconfig-spi.c
new file mode 100644
index 0000000000..44691cfcf5
--- /dev/null
+++ b/drivers/fpga/lattice-sysconfig-spi.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lattice FPGA programming over slave SPI sysCONFIG interface.
+ */
+
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#include "lattice-sysconfig.h"
+
+static const u32 ecp5_spi_max_speed_hz = 60000000;
+
+static int sysconfig_spi_cmd_transfer(struct sysconfig_priv *priv,
+ const void *tx_buf, size_t tx_len,
+ void *rx_buf, size_t rx_len)
+{
+ struct spi_device *spi = to_spi_device(priv->dev);
+
+ return spi_write_then_read(spi, tx_buf, tx_len, rx_buf, rx_len);
+}
+
+static int sysconfig_spi_bitstream_burst_init(struct sysconfig_priv *priv)
+{
+ const u8 lsc_bitstream_burst[] = SYSCONFIG_LSC_BITSTREAM_BURST;
+ struct spi_device *spi = to_spi_device(priv->dev);
+ struct spi_transfer xfer = {};
+ struct spi_message msg;
+ size_t buf_len;
+ void *buf;
+ int ret;
+
+ buf_len = sizeof(lsc_bitstream_burst);
+
+ buf = kmemdup(lsc_bitstream_burst, buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ xfer.len = buf_len;
+ xfer.tx_buf = buf;
+ xfer.cs_change = 1;
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ /*
+ * Lock SPI bus for exclusive usage until FPGA programming is done.
+ * SPI bus will be released in sysconfig_spi_bitstream_burst_complete().
+ */
+ spi_bus_lock(spi->controller);
+
+ ret = spi_sync_locked(spi, &msg);
+ if (ret)
+ spi_bus_unlock(spi->controller);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int sysconfig_spi_bitstream_burst_write(struct sysconfig_priv *priv,
+ const char *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(priv->dev);
+ struct spi_transfer xfer = {
+ .tx_buf = buf,
+ .len = len,
+ .cs_change = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ return spi_sync_locked(spi, &msg);
+}
+
+static int sysconfig_spi_bitstream_burst_complete(struct sysconfig_priv *priv)
+{
+ struct spi_device *spi = to_spi_device(priv->dev);
+
+ /* Bitstream burst write is done, release SPI bus */
+ spi_bus_unlock(spi->controller);
+
+ /* Toggle CS to finish bitstream write */
+ return spi_write(spi, NULL, 0);
+}
+
+static int sysconfig_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id;
+ struct device *dev = &spi->dev;
+ struct sysconfig_priv *priv;
+ const u32 *spi_max_speed;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spi_max_speed = device_get_match_data(dev);
+ if (!spi_max_speed) {
+ dev_id = spi_get_device_id(spi);
+ if (!dev_id)
+ return -ENODEV;
+
+ spi_max_speed = (const u32 *)dev_id->driver_data;
+ }
+
+ if (!spi_max_speed)
+ return -EINVAL;
+
+ if (spi->max_speed_hz > *spi_max_speed) {
+ dev_err(dev, "SPI speed %u is too high, maximum speed is %u\n",
+ spi->max_speed_hz, *spi_max_speed);
+ return -EINVAL;
+ }
+
+ priv->dev = dev;
+ priv->command_transfer = sysconfig_spi_cmd_transfer;
+ priv->bitstream_burst_write_init = sysconfig_spi_bitstream_burst_init;
+ priv->bitstream_burst_write = sysconfig_spi_bitstream_burst_write;
+ priv->bitstream_burst_write_complete = sysconfig_spi_bitstream_burst_complete;
+
+ return sysconfig_probe(priv);
+}
+
+static const struct spi_device_id sysconfig_spi_ids[] = {
+ {
+ .name = "sysconfig-ecp5",
+ .driver_data = (kernel_ulong_t)&ecp5_spi_max_speed_hz,
+ }, {},
+};
+MODULE_DEVICE_TABLE(spi, sysconfig_spi_ids);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id sysconfig_of_ids[] = {
+ {
+ .compatible = "lattice,sysconfig-ecp5",
+ .data = &ecp5_spi_max_speed_hz,
+ }, {},
+};
+MODULE_DEVICE_TABLE(of, sysconfig_of_ids);
+#endif /* IS_ENABLED(CONFIG_OF) */
+
+static struct spi_driver lattice_sysconfig_driver = {
+ .probe = sysconfig_spi_probe,
+ .id_table = sysconfig_spi_ids,
+ .driver = {
+ .name = "lattice_sysconfig_spi_fpga_mgr",
+ .of_match_table = of_match_ptr(sysconfig_of_ids),
+ },
+};
+module_spi_driver(lattice_sysconfig_driver);
+
+MODULE_DESCRIPTION("Lattice sysCONFIG Slave SPI FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/lattice-sysconfig.c b/drivers/fpga/lattice-sysconfig.c
new file mode 100644
index 0000000000..ba51a60f67
--- /dev/null
+++ b/drivers/fpga/lattice-sysconfig.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lattice FPGA sysCONFIG interface functions independent of port type.
+ */
+
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+
+#include "lattice-sysconfig.h"
+
+static int sysconfig_cmd_write(struct sysconfig_priv *priv, const void *buf,
+ size_t buf_len)
+{
+ return priv->command_transfer(priv, buf, buf_len, NULL, 0);
+}
+
+static int sysconfig_cmd_read(struct sysconfig_priv *priv, const void *tx_buf,
+ size_t tx_len, void *rx_buf, size_t rx_len)
+{
+ return priv->command_transfer(priv, tx_buf, tx_len, rx_buf, rx_len);
+}
+
+static int sysconfig_read_busy(struct sysconfig_priv *priv)
+{
+ const u8 lsc_check_busy[] = SYSCONFIG_LSC_CHECK_BUSY;
+ u8 busy;
+ int ret;
+
+ ret = sysconfig_cmd_read(priv, lsc_check_busy, sizeof(lsc_check_busy),
+ &busy, sizeof(busy));
+
+ return ret ? : busy;
+}
+
+static int sysconfig_poll_busy(struct sysconfig_priv *priv)
+{
+ int ret, busy;
+
+ ret = read_poll_timeout(sysconfig_read_busy, busy, busy <= 0,
+ SYSCONFIG_POLL_INTERVAL_US,
+ SYSCONFIG_POLL_BUSY_TIMEOUT_US, false, priv);
+
+ return ret ? : busy;
+}
+
+static int sysconfig_read_status(struct sysconfig_priv *priv, u32 *status)
+{
+ const u8 lsc_read_status[] = SYSCONFIG_LSC_READ_STATUS;
+ __be32 device_status;
+ int ret;
+
+ ret = sysconfig_cmd_read(priv, lsc_read_status, sizeof(lsc_read_status),
+ &device_status, sizeof(device_status));
+ if (ret)
+ return ret;
+
+ *status = be32_to_cpu(device_status);
+
+ return 0;
+}
+
+static int sysconfig_poll_status(struct sysconfig_priv *priv, u32 *status)
+{
+ int ret = sysconfig_poll_busy(priv);
+
+ if (ret)
+ return ret;
+
+ return sysconfig_read_status(priv, status);
+}
+
+static int sysconfig_poll_gpio(struct gpio_desc *gpio, bool is_active)
+{
+ int ret, val;
+
+ ret = read_poll_timeout(gpiod_get_value, val,
+ val < 0 || !!val == is_active,
+ SYSCONFIG_POLL_INTERVAL_US,
+ SYSCONFIG_POLL_GPIO_TIMEOUT_US, false, gpio);
+
+ if (val < 0)
+ return val;
+
+ return ret;
+}
+
+static int sysconfig_gpio_refresh(struct sysconfig_priv *priv)
+{
+ struct gpio_desc *program = priv->program;
+ struct gpio_desc *init = priv->init;
+ struct gpio_desc *done = priv->done;
+ int ret;
+
+ /* Enter init mode */
+ gpiod_set_value(program, 1);
+
+ ret = sysconfig_poll_gpio(init, true);
+ if (!ret)
+ ret = sysconfig_poll_gpio(done, false);
+
+ if (ret)
+ return ret;
+
+ /* Enter program mode */
+ gpiod_set_value(program, 0);
+
+ return sysconfig_poll_gpio(init, false);
+}
+
+static int sysconfig_lsc_refresh(struct sysconfig_priv *priv)
+{
+ static const u8 lsc_refresh[] = SYSCONFIG_LSC_REFRESH;
+ int ret;
+
+ ret = sysconfig_cmd_write(priv, lsc_refresh, sizeof(lsc_refresh));
+ if (ret)
+ return ret;
+
+ usleep_range(4000, 8000);
+
+ return 0;
+}
+
+static int sysconfig_refresh(struct sysconfig_priv *priv)
+{
+ struct gpio_desc *program = priv->program;
+ struct gpio_desc *init = priv->init;
+ struct gpio_desc *done = priv->done;
+
+ if (program && init && done)
+ return sysconfig_gpio_refresh(priv);
+
+ return sysconfig_lsc_refresh(priv);
+}
+
+static int sysconfig_isc_enable(struct sysconfig_priv *priv)
+{
+ u8 isc_enable[] = SYSCONFIG_ISC_ENABLE;
+ u32 status;
+ int ret;
+
+ ret = sysconfig_cmd_write(priv, isc_enable, sizeof(isc_enable));
+ if (ret)
+ return ret;
+
+ ret = sysconfig_poll_status(priv, &status);
+ if (ret)
+ return ret;
+
+ if (status & SYSCONFIG_STATUS_FAIL)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sysconfig_isc_erase(struct sysconfig_priv *priv)
+{
+ u8 isc_erase[] = SYSCONFIG_ISC_ERASE;
+ u32 status;
+ int ret;
+
+ ret = sysconfig_cmd_write(priv, isc_erase, sizeof(isc_erase));
+ if (ret)
+ return ret;
+
+ ret = sysconfig_poll_status(priv, &status);
+ if (ret)
+ return ret;
+
+ if (status & SYSCONFIG_STATUS_FAIL)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sysconfig_isc_init(struct sysconfig_priv *priv)
+{
+ int ret = sysconfig_isc_enable(priv);
+
+ if (ret)
+ return ret;
+
+ return sysconfig_isc_erase(priv);
+}
+
+static int sysconfig_lsc_init_addr(struct sysconfig_priv *priv)
+{
+ const u8 lsc_init_addr[] = SYSCONFIG_LSC_INIT_ADDR;
+
+ return sysconfig_cmd_write(priv, lsc_init_addr, sizeof(lsc_init_addr));
+}
+
+static int sysconfig_burst_write_init(struct sysconfig_priv *priv)
+{
+ return priv->bitstream_burst_write_init(priv);
+}
+
+static int sysconfig_burst_write_complete(struct sysconfig_priv *priv)
+{
+ return priv->bitstream_burst_write_complete(priv);
+}
+
+static int sysconfig_bitstream_burst_write(struct sysconfig_priv *priv,
+ const char *buf, size_t count)
+{
+ int ret = priv->bitstream_burst_write(priv, buf, count);
+
+ if (ret)
+ sysconfig_burst_write_complete(priv);
+
+ return ret;
+}
+
+static int sysconfig_isc_disable(struct sysconfig_priv *priv)
+{
+ const u8 isc_disable[] = SYSCONFIG_ISC_DISABLE;
+
+ return sysconfig_cmd_write(priv, isc_disable, sizeof(isc_disable));
+}
+
+static void sysconfig_cleanup(struct sysconfig_priv *priv)
+{
+ sysconfig_isc_erase(priv);
+ sysconfig_refresh(priv);
+}
+
+static int sysconfig_isc_finish(struct sysconfig_priv *priv)
+{
+ struct gpio_desc *done_gpio = priv->done;
+ u32 status;
+ int ret;
+
+ if (done_gpio) {
+ ret = sysconfig_isc_disable(priv);
+ if (ret)
+ return ret;
+
+ return sysconfig_poll_gpio(done_gpio, true);
+ }
+
+ ret = sysconfig_poll_status(priv, &status);
+ if (ret)
+ return ret;
+
+ if ((status & SYSCONFIG_STATUS_DONE) &&
+ !(status & SYSCONFIG_STATUS_BUSY) &&
+ !(status & SYSCONFIG_STATUS_ERR))
+ return sysconfig_isc_disable(priv);
+
+ return -EFAULT;
+}
+
+static enum fpga_mgr_states sysconfig_ops_state(struct fpga_manager *mgr)
+{
+ struct sysconfig_priv *priv = mgr->priv;
+ struct gpio_desc *done = priv->done;
+ u32 status;
+ int ret;
+
+ if (done && (gpiod_get_value(done) > 0))
+ return FPGA_MGR_STATE_OPERATING;
+
+ ret = sysconfig_read_status(priv, &status);
+ if (!ret && (status & SYSCONFIG_STATUS_DONE))
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int sysconfig_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct sysconfig_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(dev, "Partial reconfiguration is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Enter program mode */
+ ret = sysconfig_refresh(priv);
+ if (ret) {
+ dev_err(dev, "Failed to go to program mode\n");
+ return ret;
+ }
+
+ /* Enter ISC mode */
+ ret = sysconfig_isc_init(priv);
+ if (ret) {
+ dev_err(dev, "Failed to go to ISC mode\n");
+ return ret;
+ }
+
+ /* Initialize the Address Shift Register */
+ ret = sysconfig_lsc_init_addr(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the Address Shift Register\n");
+ return ret;
+ }
+
+ /* Prepare for bitstream burst write */
+ ret = sysconfig_burst_write_init(priv);
+ if (ret)
+ dev_err(dev, "Failed to prepare for bitstream burst write\n");
+
+ return ret;
+}
+
+static int sysconfig_ops_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ return sysconfig_bitstream_burst_write(mgr->priv, buf, count);
+}
+
+static int sysconfig_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct sysconfig_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ int ret;
+
+ ret = sysconfig_burst_write_complete(priv);
+ if (!ret)
+ ret = sysconfig_poll_busy(priv);
+
+ if (ret) {
+ dev_err(dev, "Error while waiting bitstream write to finish\n");
+ goto fail;
+ }
+
+ ret = sysconfig_isc_finish(priv);
+
+fail:
+ if (ret)
+ sysconfig_cleanup(priv);
+
+ return ret;
+}
+
+static const struct fpga_manager_ops sysconfig_fpga_mgr_ops = {
+ .state = sysconfig_ops_state,
+ .write_init = sysconfig_ops_write_init,
+ .write = sysconfig_ops_write,
+ .write_complete = sysconfig_ops_write_complete,
+};
+
+int sysconfig_probe(struct sysconfig_priv *priv)
+{
+ struct gpio_desc *program, *init, *done;
+ struct device *dev = priv->dev;
+ struct fpga_manager *mgr;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!priv->command_transfer ||
+ !priv->bitstream_burst_write_init ||
+ !priv->bitstream_burst_write ||
+ !priv->bitstream_burst_write_complete) {
+ dev_err(dev, "Essential callback is missing\n");
+ return -EINVAL;
+ }
+
+ program = devm_gpiod_get_optional(dev, "program", GPIOD_OUT_LOW);
+ if (IS_ERR(program))
+ return dev_err_probe(dev, PTR_ERR(program),
+ "Failed to get PROGRAM GPIO\n");
+
+ init = devm_gpiod_get_optional(dev, "init", GPIOD_IN);
+ if (IS_ERR(init))
+ return dev_err_probe(dev, PTR_ERR(init),
+ "Failed to get INIT GPIO\n");
+
+ done = devm_gpiod_get_optional(dev, "done", GPIOD_IN);
+ if (IS_ERR(done))
+ return dev_err_probe(dev, PTR_ERR(done),
+ "Failed to get DONE GPIO\n");
+
+ priv->program = program;
+ priv->init = init;
+ priv->done = done;
+
+ mgr = devm_fpga_mgr_register(dev, "Lattice sysCONFIG FPGA Manager",
+ &sysconfig_fpga_mgr_ops, priv);
+
+ return PTR_ERR_OR_ZERO(mgr);
+}
+EXPORT_SYMBOL(sysconfig_probe);
+
+MODULE_DESCRIPTION("Lattice sysCONFIG FPGA Manager Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/lattice-sysconfig.h b/drivers/fpga/lattice-sysconfig.h
new file mode 100644
index 0000000000..df47d9a524
--- /dev/null
+++ b/drivers/fpga/lattice-sysconfig.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LATTICE_SYSCONFIG_H
+#define __LATTICE_SYSCONFIG_H
+
+#define SYSCONFIG_ISC_ENABLE {0xC6, 0x00, 0x00, 0x00}
+#define SYSCONFIG_ISC_DISABLE {0x26, 0x00, 0x00, 0x00}
+#define SYSCONFIG_ISC_ERASE {0x0E, 0x01, 0x00, 0x00}
+#define SYSCONFIG_LSC_READ_STATUS {0x3C, 0x00, 0x00, 0x00}
+#define SYSCONFIG_LSC_CHECK_BUSY {0xF0, 0x00, 0x00, 0x00}
+#define SYSCONFIG_LSC_REFRESH {0x79, 0x00, 0x00, 0x00}
+#define SYSCONFIG_LSC_INIT_ADDR {0x46, 0x00, 0x00, 0x00}
+#define SYSCONFIG_LSC_BITSTREAM_BURST {0x7a, 0x00, 0x00, 0x00}
+
+#define SYSCONFIG_STATUS_DONE BIT(8)
+#define SYSCONFIG_STATUS_BUSY BIT(12)
+#define SYSCONFIG_STATUS_FAIL BIT(13)
+#define SYSCONFIG_STATUS_ERR GENMASK(25, 23)
+
+#define SYSCONFIG_POLL_INTERVAL_US 30
+#define SYSCONFIG_POLL_BUSY_TIMEOUT_US 1000000
+#define SYSCONFIG_POLL_GPIO_TIMEOUT_US 100000
+
+struct sysconfig_priv {
+ struct gpio_desc *program;
+ struct gpio_desc *init;
+ struct gpio_desc *done;
+ struct device *dev;
+ int (*command_transfer)(struct sysconfig_priv *priv, const void *tx_buf,
+ size_t tx_len, void *rx_buf, size_t rx_len);
+ int (*bitstream_burst_write_init)(struct sysconfig_priv *priv);
+ int (*bitstream_burst_write)(struct sysconfig_priv *priv,
+ const char *tx_buf, size_t tx_len);
+ int (*bitstream_burst_write_complete)(struct sysconfig_priv *priv);
+};
+
+int sysconfig_probe(struct sysconfig_priv *priv);
+
+#endif /* __LATTICE_SYSCONFIG_H */
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
new file mode 100644
index 0000000000..905607992a
--- /dev/null
+++ b/drivers/fpga/machxo2-spi.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lattice MachXO2 Slave SPI Driver
+ *
+ * Manage Lattice FPGA firmware that is loaded over SPI using
+ * the slave serial configuration interface.
+ *
+ * Copyright (C) 2018 Paolo Pisati <p.pisati@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+/* MachXO2 Programming Guide - sysCONFIG Programming Commands */
+#define IDCODE_PUB {0xe0, 0x00, 0x00, 0x00}
+#define ISC_ENABLE {0xc6, 0x08, 0x00, 0x00}
+#define ISC_ERASE {0x0e, 0x04, 0x00, 0x00}
+#define ISC_PROGRAMDONE {0x5e, 0x00, 0x00, 0x00}
+#define LSC_INITADDRESS {0x46, 0x00, 0x00, 0x00}
+#define LSC_PROGINCRNV {0x70, 0x00, 0x00, 0x01}
+#define LSC_READ_STATUS {0x3c, 0x00, 0x00, 0x00}
+#define LSC_REFRESH {0x79, 0x00, 0x00, 0x00}
+
+/*
+ * Max CCLK in Slave SPI mode according to 'MachXO2 Family Data
+ * Sheet' sysCONFIG Port Timing Specifications (3-36)
+ */
+#define MACHXO2_MAX_SPEED 66000000
+
+#define MACHXO2_LOW_DELAY_USEC 5
+#define MACHXO2_HIGH_DELAY_USEC 200
+#define MACHXO2_REFRESH_USEC 4800
+#define MACHXO2_MAX_BUSY_LOOP 128
+#define MACHXO2_MAX_REFRESH_LOOP 16
+
+#define MACHXO2_PAGE_SIZE 16
+#define MACHXO2_BUF_SIZE (MACHXO2_PAGE_SIZE + 4)
+
+/* Status register bits, errors and error mask */
+#define BUSY 12
+#define DONE 8
+#define DVER 27
+#define ENAB 9
+#define ERRBITS 23
+#define ERRMASK 7
+#define FAIL 13
+
+#define ENOERR 0 /* no error */
+#define EID 1
+#define ECMD 2
+#define ECRC 3
+#define EPREAM 4 /* preamble error */
+#define EABRT 5 /* abort error */
+#define EOVERFL 6 /* overflow error */
+#define ESDMEOF 7 /* SDM EOF */
+
+static inline u8 get_err(unsigned long *status)
+{
+ return (*status >> ERRBITS) & ERRMASK;
+}
+
+static int get_status(struct spi_device *spi, unsigned long *status)
+{
+ struct spi_message msg;
+ struct spi_transfer rx, tx;
+ static const u8 cmd[] = LSC_READ_STATUS;
+ int ret;
+
+ memset(&rx, 0, sizeof(rx));
+ memset(&tx, 0, sizeof(tx));
+ tx.tx_buf = cmd;
+ tx.len = sizeof(cmd);
+ rx.rx_buf = status;
+ rx.len = 4;
+ spi_message_init(&msg);
+ spi_message_add_tail(&tx, &msg);
+ spi_message_add_tail(&rx, &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ return ret;
+
+ *status = be32_to_cpu(*status);
+
+ return 0;
+}
+
+#ifdef DEBUG
+static const char *get_err_string(u8 err)
+{
+ switch (err) {
+ case ENOERR: return "No Error";
+ case EID: return "ID ERR";
+ case ECMD: return "CMD ERR";
+ case ECRC: return "CRC ERR";
+ case EPREAM: return "Preamble ERR";
+ case EABRT: return "Abort ERR";
+ case EOVERFL: return "Overflow ERR";
+ case ESDMEOF: return "SDM EOF";
+ }
+
+ return "Default switch case";
+}
+#endif
+
+static void dump_status_reg(unsigned long *status)
+{
+#ifdef DEBUG
+ pr_debug("machxo2 status: 0x%08lX - done=%d, cfgena=%d, busy=%d, fail=%d, devver=%d, err=%s\n",
+ *status, test_bit(DONE, status), test_bit(ENAB, status),
+ test_bit(BUSY, status), test_bit(FAIL, status),
+ test_bit(DVER, status), get_err_string(get_err(status)));
+#endif
+}
+
+static int wait_until_not_busy(struct spi_device *spi)
+{
+ unsigned long status;
+ int ret, loop = 0;
+
+ do {
+ ret = get_status(spi, &status);
+ if (ret)
+ return ret;
+ if (++loop >= MACHXO2_MAX_BUSY_LOOP)
+ return -EBUSY;
+ } while (test_bit(BUSY, &status));
+
+ return 0;
+}
+
+static int machxo2_cleanup(struct fpga_manager *mgr)
+{
+ struct spi_device *spi = mgr->priv;
+ struct spi_message msg;
+ struct spi_transfer tx[2];
+ static const u8 erase[] = ISC_ERASE;
+ static const u8 refresh[] = LSC_REFRESH;
+ int ret;
+
+ memset(tx, 0, sizeof(tx));
+ spi_message_init(&msg);
+ tx[0].tx_buf = &erase;
+ tx[0].len = sizeof(erase);
+ spi_message_add_tail(&tx[0], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+
+ ret = wait_until_not_busy(spi);
+ if (ret)
+ goto fail;
+
+ spi_message_init(&msg);
+ tx[1].tx_buf = &refresh;
+ tx[1].len = sizeof(refresh);
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&tx[1], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ dev_err(&mgr->dev, "Cleanup failed\n");
+
+ return ret;
+}
+
+static enum fpga_mgr_states machxo2_spi_state(struct fpga_manager *mgr)
+{
+ struct spi_device *spi = mgr->priv;
+ unsigned long status;
+
+ get_status(spi, &status);
+ if (!test_bit(BUSY, &status) && test_bit(DONE, &status) &&
+ get_err(&status) == ENOERR)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int machxo2_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = mgr->priv;
+ struct spi_message msg;
+ struct spi_transfer tx[3];
+ static const u8 enable[] = ISC_ENABLE;
+ static const u8 erase[] = ISC_ERASE;
+ static const u8 initaddr[] = LSC_INITADDRESS;
+ unsigned long status;
+ int ret;
+
+ if ((info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(&mgr->dev,
+ "Partial reconfiguration is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ get_status(spi, &status);
+ dump_status_reg(&status);
+ memset(tx, 0, sizeof(tx));
+ spi_message_init(&msg);
+ tx[0].tx_buf = &enable;
+ tx[0].len = sizeof(enable);
+ tx[0].delay.value = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&tx[0], &msg);
+
+ tx[1].tx_buf = &erase;
+ tx[1].len = sizeof(erase);
+ spi_message_add_tail(&tx[1], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+
+ ret = wait_until_not_busy(spi);
+ if (ret)
+ goto fail;
+
+ get_status(spi, &status);
+ if (test_bit(FAIL, &status)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ dump_status_reg(&status);
+
+ spi_message_init(&msg);
+ tx[2].tx_buf = &initaddr;
+ tx[2].len = sizeof(initaddr);
+ spi_message_add_tail(&tx[2], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+
+ get_status(spi, &status);
+ dump_status_reg(&status);
+
+ return 0;
+fail:
+ dev_err(&mgr->dev, "Error during FPGA init.\n");
+
+ return ret;
+}
+
+static int machxo2_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct spi_device *spi = mgr->priv;
+ struct spi_message msg;
+ struct spi_transfer tx;
+ static const u8 progincr[] = LSC_PROGINCRNV;
+ u8 payload[MACHXO2_BUF_SIZE];
+ unsigned long status;
+ int i, ret;
+
+ if (count % MACHXO2_PAGE_SIZE != 0) {
+ dev_err(&mgr->dev, "Malformed payload.\n");
+ return -EINVAL;
+ }
+ get_status(spi, &status);
+ dump_status_reg(&status);
+ memcpy(payload, &progincr, sizeof(progincr));
+ for (i = 0; i < count; i += MACHXO2_PAGE_SIZE) {
+ memcpy(&payload[sizeof(progincr)], &buf[i], MACHXO2_PAGE_SIZE);
+ memset(&tx, 0, sizeof(tx));
+ spi_message_init(&msg);
+ tx.tx_buf = payload;
+ tx.len = MACHXO2_BUF_SIZE;
+ tx.delay.value = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&tx, &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&mgr->dev, "Error loading the bitstream.\n");
+ return ret;
+ }
+ }
+ get_status(spi, &status);
+ dump_status_reg(&status);
+
+ return 0;
+}
+
+static int machxo2_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct spi_device *spi = mgr->priv;
+ struct spi_message msg;
+ struct spi_transfer tx[2];
+ static const u8 progdone[] = ISC_PROGRAMDONE;
+ static const u8 refresh[] = LSC_REFRESH;
+ unsigned long status;
+ int ret, refreshloop = 0;
+
+ memset(tx, 0, sizeof(tx));
+ spi_message_init(&msg);
+ tx[0].tx_buf = &progdone;
+ tx[0].len = sizeof(progdone);
+ spi_message_add_tail(&tx[0], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+ ret = wait_until_not_busy(spi);
+ if (ret)
+ goto fail;
+
+ get_status(spi, &status);
+ dump_status_reg(&status);
+ if (!test_bit(DONE, &status)) {
+ machxo2_cleanup(mgr);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ do {
+ spi_message_init(&msg);
+ tx[1].tx_buf = &refresh;
+ tx[1].len = sizeof(refresh);
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_add_tail(&tx[1], &msg);
+ ret = spi_sync(spi, &msg);
+ if (ret)
+ goto fail;
+
+ /* check refresh status */
+ get_status(spi, &status);
+ dump_status_reg(&status);
+ if (!test_bit(BUSY, &status) && test_bit(DONE, &status) &&
+ get_err(&status) == ENOERR)
+ break;
+ if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
+ machxo2_cleanup(mgr);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } while (1);
+
+ get_status(spi, &status);
+ dump_status_reg(&status);
+
+ return 0;
+fail:
+ dev_err(&mgr->dev, "Refresh failed.\n");
+
+ return ret;
+}
+
+static const struct fpga_manager_ops machxo2_ops = {
+ .state = machxo2_spi_state,
+ .write_init = machxo2_write_init,
+ .write = machxo2_write,
+ .write_complete = machxo2_write_complete,
+};
+
+static int machxo2_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct fpga_manager *mgr;
+
+ if (spi->max_speed_hz > MACHXO2_MAX_SPEED) {
+ dev_err(dev, "Speed is too high\n");
+ return -EINVAL;
+ }
+
+ mgr = devm_fpga_mgr_register(dev, "Lattice MachXO2 SPI FPGA Manager",
+ &machxo2_ops, spi);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_match[] = {
+ { .compatible = "lattice,machxo2-slave-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+#endif
+
+static const struct spi_device_id lattice_ids[] = {
+ { "machxo2-slave-spi", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, lattice_ids);
+
+static struct spi_driver machxo2_spi_driver = {
+ .driver = {
+ .name = "machxo2-slave-spi",
+ .of_match_table = of_match_ptr(of_match),
+ },
+ .probe = machxo2_spi_probe,
+ .id_table = lattice_ids,
+};
+
+module_spi_driver(machxo2_spi_driver)
+
+MODULE_AUTHOR("Paolo Pisati <p.pisati@gmail.com>");
+MODULE_DESCRIPTION("Load Lattice FPGA firmware over SPI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
new file mode 100644
index 0000000000..2a82c726d6
--- /dev/null
+++ b/drivers/fpga/microchip-spi.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Polarfire FPGA programming over slave SPI interface.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define MPF_SPI_ISC_ENABLE 0x0B
+#define MPF_SPI_ISC_DISABLE 0x0C
+#define MPF_SPI_READ_STATUS 0x00
+#define MPF_SPI_READ_DATA 0x01
+#define MPF_SPI_FRAME_INIT 0xAE
+#define MPF_SPI_FRAME 0xEE
+#define MPF_SPI_PRG_MODE 0x01
+#define MPF_SPI_RELEASE 0x23
+
+#define MPF_SPI_FRAME_SIZE 16
+
+#define MPF_HEADER_SIZE_OFFSET 24
+#define MPF_DATA_SIZE_OFFSET 55
+
+#define MPF_LOOKUP_TABLE_RECORD_SIZE 9
+#define MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET 0
+#define MPF_LOOKUP_TABLE_BLOCK_START_OFFSET 1
+
+#define MPF_COMPONENTS_SIZE_ID 5
+#define MPF_BITSTREAM_ID 8
+
+#define MPF_BITS_PER_COMPONENT_SIZE 22
+
+#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC)
+#define MPF_STATUS_BUSY BIT(0)
+#define MPF_STATUS_READY BIT(1)
+#define MPF_STATUS_SPI_VIOLATION BIT(2)
+#define MPF_STATUS_SPI_ERROR BIT(3)
+
+struct mpf_priv {
+ struct spi_device *spi;
+ bool program_mode;
+ u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
+ u8 rx;
+};
+
+static int mpf_read_status(struct mpf_priv *priv)
+{
+ /*
+ * HW status is returned on MISO in the first byte after CS went
+ * active. However, first reading can be inadequate, so we submit
+ * two identical SPI transfers and use result of the later one.
+ */
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ },
+ };
+ u8 status;
+ int ret;
+
+ priv->tx = MPF_SPI_READ_STATUS;
+
+ ret = spi_sync_transfer(priv->spi, xfers, 2);
+ if (ret)
+ return ret;
+
+ status = priv->rx;
+
+ if ((status & MPF_STATUS_SPI_VIOLATION) ||
+ (status & MPF_STATUS_SPI_ERROR))
+ return -EIO;
+
+ return status;
+}
+
+static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
+{
+ struct mpf_priv *priv = mgr->priv;
+ bool program_mode;
+ int status;
+
+ program_mode = priv->program_mode;
+ status = mpf_read_status(priv);
+
+ if (!program_mode && !status)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int mpf_ops_parse_header(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ size_t component_size_byte_num, component_size_byte_off,
+ components_size_start, bitstream_start,
+ block_id_offset, block_start_offset;
+ u8 header_size, blocks_num, block_id;
+ u32 block_start, component_size;
+ u16 components_num, i;
+
+ if (!buf) {
+ dev_err(&mgr->dev, "Image buffer is not provided\n");
+ return -EINVAL;
+ }
+
+ header_size = *(buf + MPF_HEADER_SIZE_OFFSET);
+ if (header_size > count) {
+ info->header_size = header_size;
+ return -EAGAIN;
+ }
+
+ /*
+ * Go through look-up table to find out where actual bitstream starts
+ * and where sizes of components of the bitstream lies.
+ */
+ blocks_num = *(buf + header_size - 1);
+ block_id_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_ID_OFFSET;
+ block_start_offset = header_size + MPF_LOOKUP_TABLE_BLOCK_START_OFFSET;
+
+ header_size += blocks_num * MPF_LOOKUP_TABLE_RECORD_SIZE;
+ if (header_size > count) {
+ info->header_size = header_size;
+ return -EAGAIN;
+ }
+
+ components_size_start = 0;
+ bitstream_start = 0;
+
+ while (blocks_num--) {
+ block_id = *(buf + block_id_offset);
+ block_start = get_unaligned_le32(buf + block_start_offset);
+
+ switch (block_id) {
+ case MPF_BITSTREAM_ID:
+ bitstream_start = block_start;
+ info->header_size = block_start;
+ if (block_start > count)
+ return -EAGAIN;
+
+ break;
+ case MPF_COMPONENTS_SIZE_ID:
+ components_size_start = block_start;
+ break;
+ default:
+ break;
+ }
+
+ if (bitstream_start && components_size_start)
+ break;
+
+ block_id_offset += MPF_LOOKUP_TABLE_RECORD_SIZE;
+ block_start_offset += MPF_LOOKUP_TABLE_RECORD_SIZE;
+ }
+
+ if (!bitstream_start || !components_size_start) {
+ dev_err(&mgr->dev, "Failed to parse header look-up table\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Parse bitstream size.
+ * Sizes of components of the bitstream are 22-bits long placed next
+ * to each other. Image header should be extended by now up to where
+ * actual bitstream starts, so no need for overflow check anymore.
+ */
+ components_num = get_unaligned_le16(buf + MPF_DATA_SIZE_OFFSET);
+
+ for (i = 0; i < components_num; i++) {
+ component_size_byte_num =
+ (i * MPF_BITS_PER_COMPONENT_SIZE) / BITS_PER_BYTE;
+ component_size_byte_off =
+ (i * MPF_BITS_PER_COMPONENT_SIZE) % BITS_PER_BYTE;
+
+ component_size = get_unaligned_le32(buf +
+ components_size_start +
+ component_size_byte_num);
+ component_size >>= component_size_byte_off;
+ component_size &= GENMASK(MPF_BITS_PER_COMPONENT_SIZE - 1, 0);
+
+ info->data_size += component_size * MPF_SPI_FRAME_SIZE;
+ }
+
+ return 0;
+}
+
+static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
+{
+ int ret, status;
+
+ /*
+ * Busy poll HW status. Polling stops if any of the following
+ * conditions are met:
+ * - timeout is reached
+ * - mpf_read_status() returns an error
+ * - busy bit is cleared AND mask bits are set
+ */
+ ret = read_poll_timeout(mpf_read_status, status,
+ (status < 0) ||
+ ((status & (MPF_STATUS_BUSY | mask)) == mask),
+ 0, MPF_STATUS_POLL_TIMEOUT, false, priv);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
+{
+ int status = mpf_poll_status(priv, 0);
+
+ if (status < 0)
+ return status;
+
+ return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
+}
+
+static int mpf_spi_write_then_read(struct mpf_priv *priv,
+ const void *txbuf, size_t txbuf_size,
+ void *rxbuf, size_t rxbuf_size)
+{
+ const u8 read_command[] = { MPF_SPI_READ_DATA };
+ int ret;
+
+ ret = mpf_spi_write(priv, txbuf, txbuf_size);
+ if (ret)
+ return ret;
+
+ ret = mpf_poll_status(priv, MPF_STATUS_READY);
+ if (ret < 0)
+ return ret;
+
+ return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
+ rxbuf, rxbuf_size);
+}
+
+static int mpf_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info, const char *buf,
+ size_t count)
+{
+ const u8 program_mode[] = { MPF_SPI_FRAME_INIT, MPF_SPI_PRG_MODE };
+ const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ u32 isc_ret = 0;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(dev, "Partial reconfiguration is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
+ &isc_ret, sizeof(isc_ret));
+ if (ret || isc_ret) {
+ dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
+ ret, isc_ret);
+ return -EFAULT;
+ }
+
+ ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
+ if (ret) {
+ dev_err(dev, "Failed to enter program mode: %d\n", ret);
+ return ret;
+ }
+
+ priv->program_mode = true;
+
+ return 0;
+}
+
+static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf)
+{
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .len = 1,
+ }, {
+ .tx_buf = buf,
+ .len = MPF_SPI_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ret = mpf_poll_status(priv, 0);
+ if (ret < 0)
+ return ret;
+
+ priv->tx = MPF_SPI_FRAME;
+
+ return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
+}
+
+static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ int ret, i;
+
+ if (count % MPF_SPI_FRAME_SIZE) {
+ dev_err(dev, "Bitstream size is not a multiple of %d\n",
+ MPF_SPI_FRAME_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
+ ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE);
+ if (ret) {
+ dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
+ i, count / MPF_SPI_FRAME_SIZE);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ const u8 isc_dis_command[] = { MPF_SPI_ISC_DISABLE };
+ const u8 release_command[] = { MPF_SPI_RELEASE };
+ struct mpf_priv *priv = mgr->priv;
+ struct device *dev = &mgr->dev;
+ int ret;
+
+ ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
+ if (ret) {
+ dev_err(dev, "Failed to disable ISC: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(1000, 2000);
+
+ ret = mpf_spi_write(priv, release_command, sizeof(release_command));
+ if (ret) {
+ dev_err(dev, "Failed to exit program mode: %d\n", ret);
+ return ret;
+ }
+
+ priv->program_mode = false;
+
+ return 0;
+}
+
+static const struct fpga_manager_ops mpf_ops = {
+ .state = mpf_ops_state,
+ .initial_header_size = 71,
+ .skip_header = true,
+ .parse_header = mpf_ops_parse_header,
+ .write_init = mpf_ops_write_init,
+ .write = mpf_ops_write,
+ .write_complete = mpf_ops_write_complete,
+};
+
+static int mpf_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct fpga_manager *mgr;
+ struct mpf_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+
+ mgr = devm_fpga_mgr_register(dev, "Microchip Polarfire SPI FPGA Manager",
+ &mpf_ops, priv);
+
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static const struct spi_device_id mpf_spi_ids[] = {
+ { .name = "mpf-spi-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, mpf_spi_ids);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id mpf_of_ids[] = {
+ { .compatible = "microchip,mpf-spi-fpga-mgr" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpf_of_ids);
+#endif /* IS_ENABLED(CONFIG_OF) */
+
+static struct spi_driver mpf_driver = {
+ .probe = mpf_probe,
+ .id_table = mpf_spi_ids,
+ .driver = {
+ .name = "microchip_mpf_spi_fpga_mgr",
+ .of_match_table = of_match_ptr(mpf_of_ids),
+ },
+};
+
+module_spi_driver(mpf_driver);
+
+MODULE_DESCRIPTION("Microchip Polarfire SPI FPGA Manager");
+MODULE_AUTHOR("Ivan Bornyakov <i.bornyakov@metrotek.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
new file mode 100644
index 0000000000..a6affd83f2
--- /dev/null
+++ b/drivers/fpga/of-fpga-region.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static const struct of_device_id fpga_region_of_match[] = {
+ { .compatible = "fpga-region", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+/**
+ * of_fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ *
+ * Caller will need to put_device(&region->dev) when done.
+ *
+ * Return: FPGA Region struct or NULL
+ */
+static struct fpga_region *of_fpga_region_find(struct device_node *np)
+{
+ return fpga_region_class_find(NULL, np, device_match_of_node);
+}
+
+/**
+ * of_fpga_region_get_mgr - get reference for FPGA manager
+ * @np: device node of FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
+{
+ struct device_node *mgr_node;
+ struct fpga_manager *mgr;
+
+ of_node_get(np);
+ while (np) {
+ if (of_device_is_compatible(np, "fpga-region")) {
+ mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+ if (mgr_node) {
+ mgr = of_fpga_mgr_get(mgr_node);
+ of_node_put(mgr_node);
+ of_node_put(np);
+ return mgr;
+ }
+ }
+ np = of_get_next_parent(np);
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * of_fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property. Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return: 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int of_fpga_region_get_bridges(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct device_node *region_np = dev->of_node;
+ struct fpga_image_info *info = region->info;
+ struct device_node *br, *np, *parent_br = NULL;
+ int i, ret;
+
+ /* If parent is a bridge, add to list */
+ ret = of_fpga_bridge_get_to_list(region_np->parent, info,
+ &region->bridge_list);
+
+ /* -EBUSY means parent is a bridge that is under use. Give up. */
+ if (ret == -EBUSY)
+ return ret;
+
+ /* Zero return code means parent was a bridge and was added to list. */
+ if (!ret)
+ parent_br = region_np->parent;
+
+ /* If overlay has a list of bridges, use it. */
+ br = of_parse_phandle(info->overlay, "fpga-bridges", 0);
+ if (br) {
+ of_node_put(br);
+ np = info->overlay;
+ } else {
+ np = region_np;
+ }
+
+ for (i = 0; ; i++) {
+ br = of_parse_phandle(np, "fpga-bridges", i);
+ if (!br)
+ break;
+
+ /* If parent bridge is in list, skip it. */
+ if (br == parent_br) {
+ of_node_put(br);
+ continue;
+ }
+
+ /* If node is a bridge, get it and add to list */
+ ret = of_fpga_bridge_get_to_list(br, info,
+ &region->bridge_list);
+ of_node_put(br);
+
+ /* If any of the bridges are in use, give up */
+ if (ret == -EBUSY) {
+ fpga_bridges_put(&region->bridge_list);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * child_regions_with_firmware - Used to check the child region info.
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+ struct device_node *child_region;
+ const char *child_firmware_name;
+ int ret = 0;
+
+ of_node_get(overlay);
+
+ child_region = of_find_matching_node(overlay, fpga_region_of_match);
+ while (child_region) {
+ if (!of_property_read_string(child_region, "firmware-name",
+ &child_firmware_name)) {
+ ret = -EINVAL;
+ break;
+ }
+ child_region = of_find_matching_node(child_region,
+ fpga_region_of_match);
+ }
+
+ of_node_put(child_region);
+
+ if (ret)
+ pr_err("firmware-name not allowed in child FPGA region: %pOF",
+ child_region);
+
+ return ret;
+}
+
+/**
+ * of_fpga_region_parse_ov - parse and check overlay applied to region
+ *
+ * @region: FPGA region
+ * @overlay: overlay applied to the FPGA region
+ *
+ * Given an overlay applied to an FPGA region, parse the FPGA image specific
+ * info in the overlay and do some checking.
+ *
+ * Return:
+ * NULL if overlay doesn't direct us to program the FPGA.
+ * fpga_image_info struct if there is an image to program.
+ * error code for invalid overlay.
+ */
+static struct fpga_image_info *
+of_fpga_region_parse_ov(struct fpga_region *region,
+ struct device_node *overlay)
+{
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info;
+ const char *firmware_name;
+ int ret;
+
+ if (region->info) {
+ dev_err(dev, "Region already has overlay applied.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Reject overlay if child FPGA Regions added in the overlay have
+ * firmware-name property (would mean that an FPGA region that has
+ * not been added to the live tree yet is doing FPGA programming).
+ */
+ ret = child_regions_with_firmware(overlay);
+ if (ret)
+ return ERR_PTR(ret);
+
+ info = fpga_image_info_alloc(dev);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->overlay = overlay;
+
+ /* Read FPGA region properties from the overlay */
+ if (of_property_read_bool(overlay, "partial-fpga-config"))
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ if (of_property_read_bool(overlay, "external-fpga-config"))
+ info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+ if (of_property_read_bool(overlay, "encrypted-fpga-config"))
+ info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
+
+ if (!of_property_read_string(overlay, "firmware-name",
+ &firmware_name)) {
+ info->firmware_name = devm_kstrdup(dev, firmware_name,
+ GFP_KERNEL);
+ if (!info->firmware_name)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ of_property_read_u32(overlay, "region-unfreeze-timeout-us",
+ &info->enable_timeout_us);
+
+ of_property_read_u32(overlay, "region-freeze-timeout-us",
+ &info->disable_timeout_us);
+
+ of_property_read_u32(overlay, "config-complete-timeout-us",
+ &info->config_complete_timeout_us);
+
+ /* If overlay is not programming the FPGA, don't need FPGA image info */
+ if (!info->firmware_name) {
+ ret = 0;
+ goto ret_no_info;
+ }
+
+ /*
+ * If overlay informs us FPGA was externally programmed, specifying
+ * firmware here would be ambiguous.
+ */
+ if (info->flags & FPGA_MGR_EXTERNAL_CONFIG) {
+ dev_err(dev, "error: specified firmware and external-fpga-config");
+ ret = -EINVAL;
+ goto ret_no_info;
+ }
+
+ return info;
+ret_no_info:
+ fpga_image_info_free(info);
+ return ERR_PTR(ret);
+}
+
+/**
+ * of_fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called when an overlay targeted to an FPGA Region is about to be applied.
+ * Parses the overlay for properties that influence how the FPGA will be
+ * programmed and does some checking. If the checks pass, programs the FPGA.
+ * If the checks fail, overlay is rejected and does not get added to the
+ * live tree.
+ *
+ * Return: 0 for success or negative error code for failure.
+ */
+static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info;
+ int ret;
+
+ info = of_fpga_region_parse_ov(region, nd->overlay);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ /* If overlay doesn't program the FPGA, accept it anyway. */
+ if (!info)
+ return 0;
+
+ if (region->info) {
+ dev_err(dev, "Region already has overlay applied.\n");
+ return -EINVAL;
+ }
+
+ region->info = info;
+ ret = fpga_region_program_fpga(region);
+ if (ret) {
+ /* error; reject overlay */
+ fpga_image_info_free(info);
+ region->info = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * of_fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void of_fpga_region_notify_post_remove(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ fpga_bridges_disable(&region->bridge_list);
+ fpga_bridges_put(&region->bridge_list);
+ fpga_image_info_free(region->info);
+ region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb: notifier block
+ * @action: notifier action
+ * @arg: reconfig data
+ *
+ * This notifier handles programming an FPGA when a "firmware-name" property is
+ * added to an fpga-region.
+ *
+ * Return: NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_overlay_notify_data *nd = arg;
+ struct fpga_region *region;
+ int ret;
+
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+ break;
+ case OF_OVERLAY_POST_APPLY:
+ pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_PRE_REMOVE:
+ pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_POST_REMOVE:
+ pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+ break;
+ default: /* should not happen */
+ return NOTIFY_OK;
+ }
+
+ region = of_fpga_region_find(nd->target);
+ if (!region)
+ return NOTIFY_OK;
+
+ ret = 0;
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ ret = of_fpga_region_notify_pre_apply(region, nd);
+ break;
+
+ case OF_OVERLAY_POST_REMOVE:
+ of_fpga_region_notify_post_remove(region, nd);
+ break;
+ }
+
+ put_device(&region->dev);
+
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+ .notifier_call = of_fpga_region_notify,
+};
+
+static int of_fpga_region_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ /* Find the FPGA mgr specified by region or parent region. */
+ mgr = of_fpga_region_get_mgr(np);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
+ goto eprobe_mgr_put;
+ }
+
+ of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+ platform_set_drvdata(pdev, region);
+
+ dev_info(dev, "FPGA Region probed\n");
+
+ return 0;
+
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int of_fpga_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+ struct fpga_manager *mgr = region->mgr;
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(mgr);
+
+ return 0;
+}
+
+static struct platform_driver of_fpga_region_driver = {
+ .probe = of_fpga_region_probe,
+ .remove = of_fpga_region_remove,
+ .driver = {
+ .name = "of-fpga-region",
+ .of_match_table = of_match_ptr(fpga_region_of_match),
+ },
+};
+
+/**
+ * of_fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int __init of_fpga_region_init(void)
+{
+ int ret;
+
+ ret = of_overlay_notifier_register(&fpga_region_of_nb);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&of_fpga_region_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+ return ret;
+}
+
+static void __exit of_fpga_region_exit(void)
+{
+ platform_driver_unregister(&of_fpga_region_driver);
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+}
+
+subsys_initcall(of_fpga_region_init);
+module_exit(of_fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
new file mode 100644
index 0000000000..cc4861e345
--- /dev/null
+++ b/drivers/fpga/socfpga-a10.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for Altera Arria10 SoCFPGA
+ *
+ * Copyright (C) 2015-2016 Altera Corporation
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#define A10_FPGAMGR_DCLKCNT_OFST 0x08
+#define A10_FPGAMGR_DCLKSTAT_OFST 0x0c
+#define A10_FPGAMGR_IMGCFG_CTL_00_OFST 0x70
+#define A10_FPGAMGR_IMGCFG_CTL_01_OFST 0x74
+#define A10_FPGAMGR_IMGCFG_CTL_02_OFST 0x78
+#define A10_FPGAMGR_IMGCFG_STAT_OFST 0x80
+
+#define A10_FPGAMGR_DCLKSTAT_DCLKDONE BIT(0)
+
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS BIT(1)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE BIT(2)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG BIT(8)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK (BIT(16) | BIT(17))
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT 16
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH BIT(24)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT 24
+
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR BIT(0)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE BIT(1)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE BIT(2)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN BIT(4)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN BIT(6)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY BIT(9)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE BIT(10)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR BIT(11)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN BIT(12)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK (BIT(16) | BIT(17) | BIT(18))
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT 16
+
+/* FPGA CD Ratio Value */
+#define CDRATIO_x1 0x0
+#define CDRATIO_x2 0x1
+#define CDRATIO_x4 0x2
+#define CDRATIO_x8 0x3
+
+/* Configuration width 16/32 bit */
+#define CFGWDTH_32 1
+#define CFGWDTH_16 0
+
+/*
+ * struct a10_fpga_priv - private data for fpga manager
+ * @regmap: regmap for register access
+ * @fpga_data_addr: iomap for single address data register to FPGA
+ * @clk: clock
+ */
+struct a10_fpga_priv {
+ struct regmap *regmap;
+ void __iomem *fpga_data_addr;
+ struct clk *clk;
+};
+
+static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ return true;
+ }
+ return false;
+}
+
+static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case A10_FPGAMGR_DCLKCNT_OFST:
+ case A10_FPGAMGR_DCLKSTAT_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+ case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+ case A10_FPGAMGR_IMGCFG_STAT_OFST:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config socfpga_a10_fpga_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = socfpga_a10_fpga_writeable_reg,
+ .readable_reg = socfpga_a10_fpga_readable_reg,
+ .max_register = A10_FPGAMGR_IMGCFG_STAT_OFST,
+ .cache_type = REGCACHE_NONE,
+};
+
+/*
+ * from the register map description of cdratio in imgcfg_ctrl_02:
+ * Normal Configuration : 32bit Passive Parallel
+ * Partial Reconfiguration : 16bit Passive Parallel
+ */
+static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv,
+ int width)
+{
+ width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT;
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width);
+}
+
+static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv,
+ u32 count)
+{
+ u32 val;
+
+ /* Clear any existing DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+
+ /* Issue the DCLK regmap. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count);
+
+ /* wait till the dclkcnt done */
+ regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val,
+ val, 1, 100);
+
+ /* Clear DONE status. */
+ regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+ A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+}
+
+#define RBF_ENCRYPTION_MODE_OFFSET 69
+#define RBF_DECOMPRESS_OFFSET 229
+
+static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream encrypted? */
+ return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0;
+}
+
+static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size)
+{
+ if (buf32_size < RBF_DECOMPRESS_OFFSET + 1)
+ return -EINVAL;
+
+ /* Is the bitstream compressed? */
+ return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1);
+}
+
+static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width,
+ bool encrypt, bool compress)
+{
+ unsigned int cd_ratio;
+
+ /*
+ * cd ratio is dependent on cfg width and whether the bitstream
+ * is encrypted and/or compressed.
+ *
+ * | width | encr. | compr. | cd ratio |
+ * | 16 | 0 | 0 | 1 |
+ * | 16 | 0 | 1 | 4 |
+ * | 16 | 1 | 0 | 2 |
+ * | 16 | 1 | 1 | 4 |
+ * | 32 | 0 | 0 | 1 |
+ * | 32 | 0 | 1 | 8 |
+ * | 32 | 1 | 0 | 4 |
+ * | 32 | 1 | 1 | 8 |
+ */
+ if (!compress && !encrypt)
+ return CDRATIO_x1;
+
+ if (compress)
+ cd_ratio = CDRATIO_x4;
+ else
+ cd_ratio = CDRATIO_x2;
+
+ /* If 32 bit, double the cd ratio by incrementing the field */
+ if (cfg_width == CFGWDTH_32)
+ cd_ratio += 1;
+
+ return cd_ratio;
+}
+
+static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr,
+ unsigned int cfg_width,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cd_ratio;
+ int encrypt, compress;
+
+ encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4);
+ if (encrypt < 0)
+ return -EINVAL;
+
+ compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4);
+ if (compress < 0)
+ return -EINVAL;
+
+ cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress);
+
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK,
+ cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT);
+
+ return 0;
+}
+
+static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv)
+{
+ u32 val;
+
+ regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val);
+
+ return val;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv)
+{
+ u32 reg, i;
+
+ for (i = 0; i < 10 ; i++) {
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+ return -EINVAL;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/* Start the FPGA programming by initialize the FPGA Manager */
+static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ unsigned int cfg_width;
+ u32 msel, stat, mask;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ cfg_width = CFGWDTH_16;
+ else
+ return -EINVAL;
+
+ /* Check for passive parallel (msel == 000 or 001) */
+ msel = socfpga_a10_fpga_read_stat(priv);
+ msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK;
+ msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT;
+ if ((msel != 0) && (msel != 1)) {
+ dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel);
+ return -EINVAL;
+ }
+
+ /* Make sure no external devices are interfering */
+ stat = socfpga_a10_fpga_read_stat(priv);
+ mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN |
+ A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN;
+ if ((stat & mask) != mask)
+ return -EINVAL;
+
+ /* Set cfg width */
+ socfpga_a10_fpga_set_cfg_width(priv, cfg_width);
+
+ /* Determine cd ratio from bitstream header and set cd ratio */
+ ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear s2f_nce to enable chip select. Leave pr_request
+ * unasserted and override disabled.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Set cfg_ctrl to enable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL);
+
+ /*
+ * Disable overrides not needed for pr.
+ * s2f_config==1 leaves reset deasseted.
+ */
+ regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE |
+ A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG);
+
+ /* Enable override for data, dclk, nce, and pr_request to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Assert pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST);
+
+ /* Provide 2048 DCLKs before starting the config data streaming. */
+ socfpga_a10_fpga_generate_dclks(priv, 0x7ff);
+
+ /* Wait for pr_ready */
+ return socfpga_a10_fpga_wait_for_pr_ready(priv);
+}
+
+/*
+ * write data to the FPGA data register
+ */
+static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->fpga_data_addr);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg;
+ int ret;
+
+ /* Wait for pr_done */
+ ret = socfpga_a10_fpga_wait_for_pr_done(priv);
+
+ /* Clear pr_request */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0);
+
+ /* Send some clocks to clear out any errors */
+ socfpga_a10_fpga_generate_dclks(priv, 256);
+
+ /* Disable s2f dclk and data */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0);
+
+ /* Deassert chip select */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE);
+
+ /* Disable data, dclk, nce, and pr_request override to CSS */
+ regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG,
+ A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+ /* Return any errors regarding pr_done or pr_error */
+ if (ret)
+ return ret;
+
+ /* Final check */
+ reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) ||
+ ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) {
+ dev_dbg(&mgr->dev,
+ "Timeout in final check. Status=%08xf\n", reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr)
+{
+ struct a10_fpga_priv *priv = mgr->priv;
+ u32 reg = socfpga_a10_fpga_read_stat(priv);
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+ return FPGA_MGR_STATE_WRITE;
+
+ if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR)
+ return FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+
+ if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = {
+ .initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4,
+ .state = socfpga_a10_fpga_state,
+ .write_init = socfpga_a10_fpga_write_init,
+ .write = socfpga_a10_fpga_write,
+ .write_complete = socfpga_a10_fpga_write_complete,
+};
+
+static int socfpga_a10_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a10_fpga_priv *priv;
+ void __iomem *reg_base;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* First mmio base is for register access */
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ /* Second mmio base is for writing FPGA image data */
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->fpga_data_addr))
+ return PTR_ERR(priv->fpga_data_addr);
+
+ /* regmap for register access */
+ priv->regmap = devm_regmap_init_mmio(dev, reg_base,
+ &socfpga_a10_fpga_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return -ENODEV;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "no clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return -EBUSY;
+ }
+
+ mgr = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+ if (IS_ERR(mgr)) {
+ clk_disable_unprepare(priv->clk);
+ return PTR_ERR(mgr);
+ }
+
+ platform_set_drvdata(pdev, mgr);
+
+ return 0;
+}
+
+static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct a10_fpga_priv *priv = mgr->priv;
+
+ fpga_mgr_unregister(mgr);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_a10_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-a10-fpga-mgr", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
+
+static struct platform_driver socfpga_a10_fpga_driver = {
+ .probe = socfpga_a10_fpga_probe,
+ .remove = socfpga_a10_fpga_remove,
+ .driver = {
+ .name = "socfpga_a10_fpga_manager",
+ .of_match_table = socfpga_a10_fpga_of_match,
+ },
+};
+
+module_platform_driver(socfpga_a10_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
new file mode 100644
index 0000000000..723ea0ad3f
--- /dev/null
+++ b/drivers/fpga/socfpga.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for Altera SOCFPGA
+ *
+ * Copyright (C) 2013-2015 Altera Corporation
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pm.h>
+
+/* Register offsets */
+#define SOCFPGA_FPGMGR_STAT_OFST 0x0
+#define SOCFPGA_FPGMGR_CTL_OFST 0x4
+#define SOCFPGA_FPGMGR_DCLKCNT_OFST 0x8
+#define SOCFPGA_FPGMGR_DCLKSTAT_OFST 0xc
+#define SOCFPGA_FPGMGR_GPIO_INTEN_OFST 0x830
+#define SOCFPGA_FPGMGR_GPIO_INTMSK_OFST 0x834
+#define SOCFPGA_FPGMGR_GPIO_INTTYPE_LEVEL_OFST 0x838
+#define SOCFPGA_FPGMGR_GPIO_INT_POL_OFST 0x83c
+#define SOCFPGA_FPGMGR_GPIO_INTSTAT_OFST 0x840
+#define SOCFPGA_FPGMGR_GPIO_RAW_INTSTAT_OFST 0x844
+#define SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST 0x84c
+#define SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST 0x850
+
+/* Register bit defines */
+/* SOCFPGA_FPGMGR_STAT register mode field values */
+#define SOCFPGA_FPGMGR_STAT_POWER_UP 0x0 /*ramping*/
+#define SOCFPGA_FPGMGR_STAT_RESET 0x1
+#define SOCFPGA_FPGMGR_STAT_CFG 0x2
+#define SOCFPGA_FPGMGR_STAT_INIT 0x3
+#define SOCFPGA_FPGMGR_STAT_USER_MODE 0x4
+#define SOCFPGA_FPGMGR_STAT_UNKNOWN 0x5
+#define SOCFPGA_FPGMGR_STAT_STATE_MASK 0x7
+/* This is a flag value that doesn't really happen in this register field */
+#define SOCFPGA_FPGMGR_STAT_POWER_OFF 0x0
+
+#define MSEL_PP16_FAST_NOAES_NODC 0x0
+#define MSEL_PP16_FAST_AES_NODC 0x1
+#define MSEL_PP16_FAST_AESOPT_DC 0x2
+#define MSEL_PP16_SLOW_NOAES_NODC 0x4
+#define MSEL_PP16_SLOW_AES_NODC 0x5
+#define MSEL_PP16_SLOW_AESOPT_DC 0x6
+#define MSEL_PP32_FAST_NOAES_NODC 0x8
+#define MSEL_PP32_FAST_AES_NODC 0x9
+#define MSEL_PP32_FAST_AESOPT_DC 0xa
+#define MSEL_PP32_SLOW_NOAES_NODC 0xc
+#define MSEL_PP32_SLOW_AES_NODC 0xd
+#define MSEL_PP32_SLOW_AESOPT_DC 0xe
+#define SOCFPGA_FPGMGR_STAT_MSEL_MASK 0x000000f8
+#define SOCFPGA_FPGMGR_STAT_MSEL_SHIFT 3
+
+/* SOCFPGA_FPGMGR_CTL register */
+#define SOCFPGA_FPGMGR_CTL_EN 0x00000001
+#define SOCFPGA_FPGMGR_CTL_NCE 0x00000002
+#define SOCFPGA_FPGMGR_CTL_NCFGPULL 0x00000004
+
+#define CDRATIO_X1 0x00000000
+#define CDRATIO_X2 0x00000040
+#define CDRATIO_X4 0x00000080
+#define CDRATIO_X8 0x000000c0
+#define SOCFPGA_FPGMGR_CTL_CDRATIO_MASK 0x000000c0
+
+#define SOCFPGA_FPGMGR_CTL_AXICFGEN 0x00000100
+
+#define CFGWDTH_16 0x00000000
+#define CFGWDTH_32 0x00000200
+#define SOCFPGA_FPGMGR_CTL_CFGWDTH_MASK 0x00000200
+
+/* SOCFPGA_FPGMGR_DCLKSTAT register */
+#define SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE 0x1
+
+/* SOCFPGA_FPGMGR_GPIO_* registers share the same bit positions */
+#define SOCFPGA_FPGMGR_MON_NSTATUS 0x0001
+#define SOCFPGA_FPGMGR_MON_CONF_DONE 0x0002
+#define SOCFPGA_FPGMGR_MON_INIT_DONE 0x0004
+#define SOCFPGA_FPGMGR_MON_CRC_ERROR 0x0008
+#define SOCFPGA_FPGMGR_MON_CVP_CONF_DONE 0x0010
+#define SOCFPGA_FPGMGR_MON_PR_READY 0x0020
+#define SOCFPGA_FPGMGR_MON_PR_ERROR 0x0040
+#define SOCFPGA_FPGMGR_MON_PR_DONE 0x0080
+#define SOCFPGA_FPGMGR_MON_NCONFIG_PIN 0x0100
+#define SOCFPGA_FPGMGR_MON_NSTATUS_PIN 0x0200
+#define SOCFPGA_FPGMGR_MON_CONF_DONE_PIN 0x0400
+#define SOCFPGA_FPGMGR_MON_FPGA_POWER_ON 0x0800
+#define SOCFPGA_FPGMGR_MON_STATUS_MASK 0x0fff
+
+#define SOCFPGA_FPGMGR_NUM_SUPPLIES 3
+#define SOCFPGA_RESUME_TIMEOUT 3
+
+/* In power-up order. Reverse for power-down. */
+static const char *supply_names[SOCFPGA_FPGMGR_NUM_SUPPLIES] __maybe_unused = {
+ "FPGA-1.5V",
+ "FPGA-1.1V",
+ "FPGA-2.5V",
+};
+
+struct socfpga_fpga_priv {
+ void __iomem *fpga_base_addr;
+ void __iomem *fpga_data_addr;
+ struct completion status_complete;
+ int irq;
+};
+
+struct cfgmgr_mode {
+ /* Values to set in the CTRL register */
+ u32 ctrl;
+
+ /* flag that this table entry is a valid mode */
+ bool valid;
+};
+
+/* For SOCFPGA_FPGMGR_STAT_MSEL field */
+static struct cfgmgr_mode cfgmgr_modes[] = {
+ [MSEL_PP16_FAST_NOAES_NODC] = { CFGWDTH_16 | CDRATIO_X1, 1 },
+ [MSEL_PP16_FAST_AES_NODC] = { CFGWDTH_16 | CDRATIO_X2, 1 },
+ [MSEL_PP16_FAST_AESOPT_DC] = { CFGWDTH_16 | CDRATIO_X4, 1 },
+ [MSEL_PP16_SLOW_NOAES_NODC] = { CFGWDTH_16 | CDRATIO_X1, 1 },
+ [MSEL_PP16_SLOW_AES_NODC] = { CFGWDTH_16 | CDRATIO_X2, 1 },
+ [MSEL_PP16_SLOW_AESOPT_DC] = { CFGWDTH_16 | CDRATIO_X4, 1 },
+ [MSEL_PP32_FAST_NOAES_NODC] = { CFGWDTH_32 | CDRATIO_X1, 1 },
+ [MSEL_PP32_FAST_AES_NODC] = { CFGWDTH_32 | CDRATIO_X4, 1 },
+ [MSEL_PP32_FAST_AESOPT_DC] = { CFGWDTH_32 | CDRATIO_X8, 1 },
+ [MSEL_PP32_SLOW_NOAES_NODC] = { CFGWDTH_32 | CDRATIO_X1, 1 },
+ [MSEL_PP32_SLOW_AES_NODC] = { CFGWDTH_32 | CDRATIO_X4, 1 },
+ [MSEL_PP32_SLOW_AESOPT_DC] = { CFGWDTH_32 | CDRATIO_X8, 1 },
+};
+
+static u32 socfpga_fpga_readl(struct socfpga_fpga_priv *priv, u32 reg_offset)
+{
+ return readl(priv->fpga_base_addr + reg_offset);
+}
+
+static void socfpga_fpga_writel(struct socfpga_fpga_priv *priv, u32 reg_offset,
+ u32 value)
+{
+ writel(value, priv->fpga_base_addr + reg_offset);
+}
+
+static u32 socfpga_fpga_raw_readl(struct socfpga_fpga_priv *priv,
+ u32 reg_offset)
+{
+ return __raw_readl(priv->fpga_base_addr + reg_offset);
+}
+
+static void socfpga_fpga_raw_writel(struct socfpga_fpga_priv *priv,
+ u32 reg_offset, u32 value)
+{
+ __raw_writel(value, priv->fpga_base_addr + reg_offset);
+}
+
+static void socfpga_fpga_data_writel(struct socfpga_fpga_priv *priv, u32 value)
+{
+ writel(value, priv->fpga_data_addr);
+}
+
+static inline void socfpga_fpga_set_bitsl(struct socfpga_fpga_priv *priv,
+ u32 offset, u32 bits)
+{
+ u32 val;
+
+ val = socfpga_fpga_readl(priv, offset);
+ val |= bits;
+ socfpga_fpga_writel(priv, offset, val);
+}
+
+static inline void socfpga_fpga_clr_bitsl(struct socfpga_fpga_priv *priv,
+ u32 offset, u32 bits)
+{
+ u32 val;
+
+ val = socfpga_fpga_readl(priv, offset);
+ val &= ~bits;
+ socfpga_fpga_writel(priv, offset, val);
+}
+
+static u32 socfpga_fpga_mon_status_get(struct socfpga_fpga_priv *priv)
+{
+ return socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST) &
+ SOCFPGA_FPGMGR_MON_STATUS_MASK;
+}
+
+static u32 socfpga_fpga_state_get(struct socfpga_fpga_priv *priv)
+{
+ u32 status = socfpga_fpga_mon_status_get(priv);
+
+ if ((status & SOCFPGA_FPGMGR_MON_FPGA_POWER_ON) == 0)
+ return SOCFPGA_FPGMGR_STAT_POWER_OFF;
+
+ return socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_STAT_OFST) &
+ SOCFPGA_FPGMGR_STAT_STATE_MASK;
+}
+
+static void socfpga_fpga_clear_done_status(struct socfpga_fpga_priv *priv)
+{
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST,
+ SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE);
+}
+
+/*
+ * Set the DCLKCNT, wait for DCLKSTAT to report the count completed, and clear
+ * the complete status.
+ */
+static int socfpga_fpga_dclk_set_and_wait_clear(struct socfpga_fpga_priv *priv,
+ u32 count)
+{
+ int timeout = 2;
+ u32 done;
+
+ /* Clear any existing DONE status. */
+ if (socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST))
+ socfpga_fpga_clear_done_status(priv);
+
+ /* Issue the DCLK count. */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_DCLKCNT_OFST, count);
+
+ /* Poll DCLKSTAT to see if it completed in the timeout period. */
+ do {
+ done = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST);
+ if (done == SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE) {
+ socfpga_fpga_clear_done_status(priv);
+ return 0;
+ }
+ udelay(1);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static int socfpga_fpga_wait_for_state(struct socfpga_fpga_priv *priv,
+ u32 state)
+{
+ int timeout = 2;
+
+ /*
+ * HW doesn't support an interrupt for changes in state, so poll to see
+ * if it matches the requested state within the timeout period.
+ */
+ do {
+ if ((socfpga_fpga_state_get(priv) & state) != 0)
+ return 0;
+ msleep(20);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void socfpga_fpga_enable_irqs(struct socfpga_fpga_priv *priv, u32 irqs)
+{
+ /* set irqs to level sensitive */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTTYPE_LEVEL_OFST, 0);
+
+ /* set interrupt polarity */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INT_POL_OFST, irqs);
+
+ /* clear irqs */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST, irqs);
+
+ /* unmask interrupts */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTMSK_OFST, 0);
+
+ /* enable interrupts */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTEN_OFST, irqs);
+}
+
+static void socfpga_fpga_disable_irqs(struct socfpga_fpga_priv *priv)
+{
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTEN_OFST, 0);
+}
+
+static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id)
+{
+ struct socfpga_fpga_priv *priv = dev_id;
+ u32 irqs, st;
+ bool conf_done, nstatus;
+
+ /* clear irqs */
+ irqs = socfpga_fpga_raw_readl(priv, SOCFPGA_FPGMGR_GPIO_INTSTAT_OFST);
+
+ socfpga_fpga_raw_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST, irqs);
+
+ st = socfpga_fpga_raw_readl(priv, SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST);
+ conf_done = (st & SOCFPGA_FPGMGR_MON_CONF_DONE) != 0;
+ nstatus = (st & SOCFPGA_FPGMGR_MON_NSTATUS) != 0;
+
+ /* success */
+ if (conf_done && nstatus) {
+ /* disable irqs */
+ socfpga_fpga_raw_writel(priv,
+ SOCFPGA_FPGMGR_GPIO_INTEN_OFST, 0);
+ complete(&priv->status_complete);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv)
+{
+ int timeout, ret = 0;
+
+ socfpga_fpga_disable_irqs(priv);
+ init_completion(&priv->status_complete);
+ socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &priv->status_complete,
+ msecs_to_jiffies(10));
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+
+ socfpga_fpga_disable_irqs(priv);
+ return ret;
+}
+
+static int socfpga_fpga_cfg_mode_get(struct socfpga_fpga_priv *priv)
+{
+ u32 msel;
+
+ msel = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_STAT_OFST);
+ msel &= SOCFPGA_FPGMGR_STAT_MSEL_MASK;
+ msel >>= SOCFPGA_FPGMGR_STAT_MSEL_SHIFT;
+
+ /* Check that this MSEL setting is supported */
+ if ((msel >= ARRAY_SIZE(cfgmgr_modes)) || !cfgmgr_modes[msel].valid)
+ return -EINVAL;
+
+ return msel;
+}
+
+static int socfpga_fpga_cfg_mode_set(struct socfpga_fpga_priv *priv)
+{
+ u32 ctrl_reg;
+ int mode;
+
+ /* get value from MSEL pins */
+ mode = socfpga_fpga_cfg_mode_get(priv);
+ if (mode < 0)
+ return mode;
+
+ /* Adjust CTRL for the CDRATIO */
+ ctrl_reg = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_CTL_OFST);
+ ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_CDRATIO_MASK;
+ ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_CFGWDTH_MASK;
+ ctrl_reg |= cfgmgr_modes[mode].ctrl;
+
+ /* Set NCE to 0. */
+ ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_NCE;
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
+
+ return 0;
+}
+
+static int socfpga_fpga_reset(struct fpga_manager *mgr)
+{
+ struct socfpga_fpga_priv *priv = mgr->priv;
+ u32 ctrl_reg, status;
+ int ret;
+
+ /*
+ * Step 1:
+ * - Set CTRL.CFGWDTH, CTRL.CDRATIO to match cfg mode
+ * - Set CTRL.NCE to 0
+ */
+ ret = socfpga_fpga_cfg_mode_set(priv);
+ if (ret)
+ return ret;
+
+ /* Step 2: Set CTRL.EN to 1 */
+ socfpga_fpga_set_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
+ SOCFPGA_FPGMGR_CTL_EN);
+
+ /* Step 3: Set CTRL.NCONFIGPULL to 1 to put FPGA in reset */
+ ctrl_reg = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_CTL_OFST);
+ ctrl_reg |= SOCFPGA_FPGMGR_CTL_NCFGPULL;
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
+
+ /* Step 4: Wait for STATUS.MODE to report FPGA is in reset phase */
+ status = socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_RESET);
+
+ /* Step 5: Set CONTROL.NCONFIGPULL to 0 to release FPGA from reset */
+ ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_NCFGPULL;
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
+
+ /* Timeout waiting for reset */
+ if (status)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*
+ * Prepare the FPGA to receive the configuration data.
+ */
+static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct socfpga_fpga_priv *priv = mgr->priv;
+ int ret;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+ /* Steps 1 - 5: Reset the FPGA */
+ ret = socfpga_fpga_reset(mgr);
+ if (ret)
+ return ret;
+
+ /* Step 6: Wait for FPGA to enter configuration phase */
+ if (socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_CFG))
+ return -ETIMEDOUT;
+
+ /* Step 7: Clear nSTATUS interrupt */
+ socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST,
+ SOCFPGA_FPGMGR_MON_NSTATUS);
+
+ /* Step 8: Set CTRL.AXICFGEN to 1 to enable transfer of config data */
+ socfpga_fpga_set_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
+ SOCFPGA_FPGMGR_CTL_AXICFGEN);
+
+ return 0;
+}
+
+/*
+ * Step 9: write data to the FPGA data register
+ */
+static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct socfpga_fpga_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks. */
+ while (count >= sizeof(u32)) {
+ socfpga_fpga_data_writel(priv, buffer_32[i++]);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks. */
+ switch (count) {
+ case 3:
+ socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x00ffffff);
+ break;
+ case 2:
+ socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x0000ffff);
+ break;
+ case 1:
+ socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x000000ff);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen. */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct socfpga_fpga_priv *priv = mgr->priv;
+ u32 status;
+
+ /*
+ * Step 10:
+ * - Observe CONF_DONE and nSTATUS (active low)
+ * - if CONF_DONE = 1 and nSTATUS = 1, configuration was successful
+ * - if CONF_DONE = 0 and nSTATUS = 0, configuration failed
+ */
+ status = socfpga_fpga_wait_for_config_done(priv);
+ if (status)
+ return status;
+
+ /* Step 11: Clear CTRL.AXICFGEN to disable transfer of config data */
+ socfpga_fpga_clr_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
+ SOCFPGA_FPGMGR_CTL_AXICFGEN);
+
+ /*
+ * Step 12:
+ * - Write 4 to DCLKCNT
+ * - Wait for STATUS.DCNTDONE = 1
+ * - Clear W1C bit in STATUS.DCNTDONE
+ */
+ if (socfpga_fpga_dclk_set_and_wait_clear(priv, 4))
+ return -ETIMEDOUT;
+
+ /* Step 13: Wait for STATUS.MODE to report USER MODE */
+ if (socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_USER_MODE))
+ return -ETIMEDOUT;
+
+ /* Step 14: Set CTRL.EN to 0 */
+ socfpga_fpga_clr_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
+ SOCFPGA_FPGMGR_CTL_EN);
+
+ return 0;
+}
+
+/* Translate state register values to FPGA framework state */
+static const enum fpga_mgr_states socfpga_state_to_framework_state[] = {
+ [SOCFPGA_FPGMGR_STAT_POWER_OFF] = FPGA_MGR_STATE_POWER_OFF,
+ [SOCFPGA_FPGMGR_STAT_RESET] = FPGA_MGR_STATE_RESET,
+ [SOCFPGA_FPGMGR_STAT_CFG] = FPGA_MGR_STATE_WRITE_INIT,
+ [SOCFPGA_FPGMGR_STAT_INIT] = FPGA_MGR_STATE_WRITE_INIT,
+ [SOCFPGA_FPGMGR_STAT_USER_MODE] = FPGA_MGR_STATE_OPERATING,
+ [SOCFPGA_FPGMGR_STAT_UNKNOWN] = FPGA_MGR_STATE_UNKNOWN,
+};
+
+static enum fpga_mgr_states socfpga_fpga_ops_state(struct fpga_manager *mgr)
+{
+ struct socfpga_fpga_priv *priv = mgr->priv;
+ enum fpga_mgr_states ret;
+ u32 state;
+
+ state = socfpga_fpga_state_get(priv);
+
+ if (state < ARRAY_SIZE(socfpga_state_to_framework_state))
+ ret = socfpga_state_to_framework_state[state];
+ else
+ ret = FPGA_MGR_STATE_UNKNOWN;
+
+ return ret;
+}
+
+static const struct fpga_manager_ops socfpga_fpga_ops = {
+ .state = socfpga_fpga_ops_state,
+ .write_init = socfpga_fpga_ops_configure_init,
+ .write = socfpga_fpga_ops_configure_write,
+ .write_complete = socfpga_fpga_ops_configure_complete,
+};
+
+static int socfpga_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct socfpga_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fpga_base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->fpga_base_addr))
+ return PTR_ERR(priv->fpga_base_addr);
+
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->fpga_data_addr))
+ return PTR_ERR(priv->fpga_data_addr);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = devm_request_irq(dev, priv->irq, socfpga_fpga_isr, 0,
+ dev_name(dev), priv);
+ if (ret)
+ return ret;
+
+ mgr = devm_fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id socfpga_fpga_of_match[] = {
+ { .compatible = "altr,socfpga-fpga-mgr", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_fpga_of_match);
+#endif
+
+static struct platform_driver socfpga_fpga_driver = {
+ .probe = socfpga_fpga_probe,
+ .driver = {
+ .name = "socfpga_fpga_manager",
+ .of_match_table = of_match_ptr(socfpga_fpga_of_match),
+ },
+};
+
+module_platform_driver(socfpga_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("Altera SOCFPGA FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
new file mode 100644
index 0000000000..cacb9cc575
--- /dev/null
+++ b/drivers/fpga/stratix10-soc.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for Intel Stratix10 SoC
+ *
+ * Copyright (C) 2018 Intel Corporation
+ */
+#include <linux/completion.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/*
+ * FPGA programming requires a higher level of privilege (EL3), per the SoC
+ * design.
+ */
+#define NUM_SVC_BUFS 4
+#define SVC_BUF_SIZE SZ_512K
+
+/* Indicates buffer is in use if set */
+#define SVC_BUF_LOCK 0
+
+#define S10_BUFFER_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_BUFFER_TIMEOUT_MS))
+#define S10_RECONFIG_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_REQUEST_TIMEOUT_MS))
+
+/*
+ * struct s10_svc_buf
+ * buf: virtual address of buf provided by service layer
+ * lock: locked if buffer is in use
+ */
+struct s10_svc_buf {
+ char *buf;
+ unsigned long lock;
+};
+
+struct s10_priv {
+ struct stratix10_svc_chan *chan;
+ struct stratix10_svc_client client;
+ struct completion status_return_completion;
+ struct s10_svc_buf svc_bufs[NUM_SVC_BUFS];
+ unsigned long status;
+};
+
+static int s10_svc_send_msg(struct s10_priv *priv,
+ enum stratix10_svc_command_code command,
+ void *payload, u32 payload_length)
+{
+ struct stratix10_svc_chan *chan = priv->chan;
+ struct device *dev = priv->client.dev;
+ struct stratix10_svc_client_msg msg;
+ int ret;
+
+ dev_dbg(dev, "%s cmd=%d payload=%p length=%d\n",
+ __func__, command, payload, payload_length);
+
+ msg.command = command;
+ msg.payload = payload;
+ msg.payload_length = payload_length;
+
+ ret = stratix10_svc_send(chan, &msg);
+ dev_dbg(dev, "stratix10_svc_send returned status %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Free buffers allocated from the service layer's pool that are not in use.
+ * Return true when all buffers are freed.
+ */
+static bool s10_free_buffers(struct fpga_manager *mgr)
+{
+ struct s10_priv *priv = mgr->priv;
+ uint num_free = 0;
+ uint i;
+
+ for (i = 0; i < NUM_SVC_BUFS; i++) {
+ if (!priv->svc_bufs[i].buf) {
+ num_free++;
+ continue;
+ }
+
+ if (!test_and_set_bit_lock(SVC_BUF_LOCK,
+ &priv->svc_bufs[i].lock)) {
+ stratix10_svc_free_memory(priv->chan,
+ priv->svc_bufs[i].buf);
+ priv->svc_bufs[i].buf = NULL;
+ num_free++;
+ }
+ }
+
+ return num_free == NUM_SVC_BUFS;
+}
+
+/*
+ * Returns count of how many buffers are not in use.
+ */
+static uint s10_free_buffer_count(struct fpga_manager *mgr)
+{
+ struct s10_priv *priv = mgr->priv;
+ uint num_free = 0;
+ uint i;
+
+ for (i = 0; i < NUM_SVC_BUFS; i++)
+ if (!priv->svc_bufs[i].buf)
+ num_free++;
+
+ return num_free;
+}
+
+/*
+ * s10_unlock_bufs
+ * Given the returned buffer address, match that address to our buffer struct
+ * and unlock that buffer. This marks it as available to be refilled and sent
+ * (or freed).
+ * priv: private data
+ * kaddr: kernel address of buffer that was returned from service layer
+ */
+static void s10_unlock_bufs(struct s10_priv *priv, void *kaddr)
+{
+ uint i;
+
+ if (!kaddr)
+ return;
+
+ for (i = 0; i < NUM_SVC_BUFS; i++)
+ if (priv->svc_bufs[i].buf == kaddr) {
+ clear_bit_unlock(SVC_BUF_LOCK,
+ &priv->svc_bufs[i].lock);
+ return;
+ }
+
+ WARN(1, "Unknown buffer returned from service layer %p\n", kaddr);
+}
+
+/*
+ * s10_receive_callback - callback for service layer to use to provide client
+ * (this driver) messages received through the mailbox.
+ * client: service layer client struct
+ * data: message from service layer
+ */
+static void s10_receive_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct s10_priv *priv = client->priv;
+ u32 status;
+ int i;
+
+ WARN_ONCE(!data, "%s: stratix10_svc_rc_data = NULL", __func__);
+
+ status = data->status;
+
+ /*
+ * Here we set status bits as we receive them. Elsewhere, we always use
+ * test_and_clear_bit() to check status in priv->status
+ */
+ for (i = 0; i <= SVC_STATUS_ERROR; i++)
+ if (status & (1 << i))
+ set_bit(i, &priv->status);
+
+ if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
+ s10_unlock_bufs(priv, data->kaddr1);
+ s10_unlock_bufs(priv, data->kaddr2);
+ s10_unlock_bufs(priv, data->kaddr3);
+ }
+
+ complete(&priv->status_return_completion);
+}
+
+/*
+ * s10_ops_write_init - prepare for FPGA reconfiguration by requesting
+ * partial reconfig and allocating buffers from the service layer.
+ */
+static int s10_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct s10_priv *priv = mgr->priv;
+ struct device *dev = priv->client.dev;
+ struct stratix10_svc_command_config_type ctype;
+ char *kbuf;
+ uint i;
+ int ret;
+
+ ctype.flags = 0;
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_dbg(dev, "Requesting partial reconfiguration.\n");
+ ctype.flags |= BIT(COMMAND_RECONFIG_FLAG_PARTIAL);
+ } else {
+ dev_dbg(dev, "Requesting full reconfiguration.\n");
+ }
+
+ reinit_completion(&priv->status_return_completion);
+ ret = s10_svc_send_msg(priv, COMMAND_RECONFIG,
+ &ctype, sizeof(ctype));
+ if (ret < 0)
+ goto init_done;
+
+ ret = wait_for_completion_timeout(
+ &priv->status_return_completion, S10_RECONFIG_TIMEOUT);
+ if (!ret) {
+ dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n");
+ ret = -ETIMEDOUT;
+ goto init_done;
+ }
+
+ ret = 0;
+ if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
+ ret = -ETIMEDOUT;
+ goto init_done;
+ }
+
+ /* Allocate buffers from the service layer's pool. */
+ for (i = 0; i < NUM_SVC_BUFS; i++) {
+ kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
+ if (IS_ERR(kbuf)) {
+ s10_free_buffers(mgr);
+ ret = PTR_ERR(kbuf);
+ goto init_done;
+ }
+
+ priv->svc_bufs[i].buf = kbuf;
+ priv->svc_bufs[i].lock = 0;
+ }
+
+init_done:
+ stratix10_svc_done(priv->chan);
+ return ret;
+}
+
+/*
+ * s10_send_buf - send a buffer to the service layer queue
+ * mgr: fpga manager struct
+ * buf: fpga image buffer
+ * count: size of buf in bytes
+ * Returns # of bytes transferred or -ENOBUFS if the all the buffers are in use
+ * or if the service queue is full. Never returns 0.
+ */
+static int s10_send_buf(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct s10_priv *priv = mgr->priv;
+ struct device *dev = priv->client.dev;
+ void *svc_buf;
+ size_t xfer_sz;
+ int ret;
+ uint i;
+
+ /* get/lock a buffer that that's not being used */
+ for (i = 0; i < NUM_SVC_BUFS; i++)
+ if (!test_and_set_bit_lock(SVC_BUF_LOCK,
+ &priv->svc_bufs[i].lock))
+ break;
+
+ if (i == NUM_SVC_BUFS)
+ return -ENOBUFS;
+
+ xfer_sz = count < SVC_BUF_SIZE ? count : SVC_BUF_SIZE;
+
+ svc_buf = priv->svc_bufs[i].buf;
+ memcpy(svc_buf, buf, xfer_sz);
+ ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_DATA_SUBMIT,
+ svc_buf, xfer_sz);
+ if (ret < 0) {
+ dev_err(dev,
+ "Error while sending data to service layer (%d)", ret);
+ clear_bit_unlock(SVC_BUF_LOCK, &priv->svc_bufs[i].lock);
+ return ret;
+ }
+
+ return xfer_sz;
+}
+
+/*
+ * Send an FPGA image to privileged layers to write to the FPGA. When done
+ * sending, free all service layer buffers we allocated in write_init.
+ */
+static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct s10_priv *priv = mgr->priv;
+ struct device *dev = priv->client.dev;
+ long wait_status;
+ int sent = 0;
+ int ret = 0;
+
+ /*
+ * Loop waiting for buffers to be returned. When a buffer is returned,
+ * reuse it to send more data or free if if all data has been sent.
+ */
+ while (count > 0 || s10_free_buffer_count(mgr) != NUM_SVC_BUFS) {
+ reinit_completion(&priv->status_return_completion);
+
+ if (count > 0) {
+ sent = s10_send_buf(mgr, buf, count);
+ if (sent < 0)
+ continue;
+
+ count -= sent;
+ buf += sent;
+ } else {
+ if (s10_free_buffers(mgr))
+ return 0;
+
+ ret = s10_svc_send_msg(
+ priv, COMMAND_RECONFIG_DATA_CLAIM,
+ NULL, 0);
+ if (ret < 0)
+ break;
+ }
+
+ /*
+ * If callback hasn't already happened, wait for buffers to be
+ * returned from service layer
+ */
+ wait_status = 1; /* not timed out */
+ if (!priv->status)
+ wait_status = wait_for_completion_timeout(
+ &priv->status_return_completion,
+ S10_BUFFER_TIMEOUT);
+
+ if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+ test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
+ &priv->status)) {
+ ret = 0;
+ continue;
+ }
+
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ if (!wait_status) {
+ dev_err(dev, "timeout waiting for svc layer buffers\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ if (!s10_free_buffers(mgr))
+ dev_err(dev, "%s not all buffers were freed\n", __func__);
+
+ return ret;
+}
+
+static int s10_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct s10_priv *priv = mgr->priv;
+ struct device *dev = priv->client.dev;
+ unsigned long timeout;
+ int ret;
+
+ timeout = usecs_to_jiffies(info->config_complete_timeout_us);
+
+ do {
+ reinit_completion(&priv->status_return_completion);
+
+ ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_STATUS, NULL, 0);
+ if (ret < 0)
+ break;
+
+ ret = wait_for_completion_timeout(
+ &priv->status_return_completion, timeout);
+ if (!ret) {
+ dev_err(dev,
+ "timeout waiting for RECONFIG_COMPLETED\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ /* Not error or timeout, so ret is # of jiffies until timeout */
+ timeout = ret;
+ ret = 0;
+
+ if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
+ break;
+
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
+ ret = -EFAULT;
+ break;
+ }
+ } while (1);
+
+ stratix10_svc_done(priv->chan);
+
+ return ret;
+}
+
+static const struct fpga_manager_ops s10_ops = {
+ .write_init = s10_ops_write_init,
+ .write = s10_ops_write,
+ .write_complete = s10_ops_write_complete,
+};
+
+static int s10_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s10_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client.dev = dev;
+ priv->client.receive_cb = s10_receive_callback;
+ priv->client.priv = priv;
+
+ priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+ SVC_CLIENT_FPGA);
+ if (IS_ERR(priv->chan)) {
+ dev_err(dev, "couldn't get service channel (%s)\n",
+ SVC_CLIENT_FPGA);
+ return PTR_ERR(priv->chan);
+ }
+
+ init_completion(&priv->status_return_completion);
+
+ mgr = fpga_mgr_register(dev, "Stratix10 SOC FPGA Manager",
+ &s10_ops, priv);
+ if (IS_ERR(mgr)) {
+ dev_err(dev, "unable to register FPGA manager\n");
+ ret = PTR_ERR(mgr);
+ goto probe_err;
+ }
+
+ platform_set_drvdata(pdev, mgr);
+ return 0;
+
+probe_err:
+ stratix10_svc_free_channel(priv->chan);
+ return ret;
+}
+
+static int s10_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct s10_priv *priv = mgr->priv;
+
+ fpga_mgr_unregister(mgr);
+ stratix10_svc_free_channel(priv->chan);
+
+ return 0;
+}
+
+static const struct of_device_id s10_of_match[] = {
+ {.compatible = "intel,stratix10-soc-fpga-mgr"},
+ {.compatible = "intel,agilex-soc-fpga-mgr"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, s10_of_match);
+
+static struct platform_driver s10_driver = {
+ .probe = s10_probe,
+ .remove = s10_remove,
+ .driver = {
+ .name = "Stratix10 SoC FPGA manager",
+ .of_match_table = of_match_ptr(s10_of_match),
+ },
+};
+
+static int __init s10_init(void)
+{
+ struct device_node *fw_np;
+ struct device_node *np;
+ int ret;
+
+ fw_np = of_find_node_by_name(NULL, "svc");
+ if (!fw_np)
+ return -ENODEV;
+
+ of_node_get(fw_np);
+ np = of_find_matching_node(fw_np, s10_of_match);
+ if (!np) {
+ of_node_put(fw_np);
+ return -ENODEV;
+ }
+
+ of_node_put(np);
+ ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
+ of_node_put(fw_np);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&s10_driver);
+}
+
+static void __exit s10_exit(void)
+{
+ return platform_driver_unregister(&s10_driver);
+}
+
+module_init(s10_init);
+module_exit(s10_exit);
+
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_DESCRIPTION("Intel Stratix 10 SOC FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/tests/.kunitconfig b/drivers/fpga/tests/.kunitconfig
new file mode 100644
index 0000000000..a1c2a2974c
--- /dev/null
+++ b/drivers/fpga/tests/.kunitconfig
@@ -0,0 +1,5 @@
+CONFIG_KUNIT=y
+CONFIG_FPGA=y
+CONFIG_FPGA_REGION=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_FPGA_KUNIT_TESTS=y
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
new file mode 100644
index 0000000000..d4e55204c0
--- /dev/null
+++ b/drivers/fpga/tests/Kconfig
@@ -0,0 +1,11 @@
+config FPGA_KUNIT_TESTS
+ bool "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA=y && FPGA_REGION=y && FPGA_BRIDGE=y && KUNIT=y && MODULES=n
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the FPGA subsystem
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/fpga/tests/Makefile b/drivers/fpga/tests/Makefile
new file mode 100644
index 0000000000..bb78215c64
--- /dev/null
+++ b/drivers/fpga/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for KUnit test suites for the FPGA subsystem
+#
+
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += fpga-mgr-test.o fpga-bridge-test.o fpga-region-test.o
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
new file mode 100644
index 0000000000..1d258002cd
--- /dev/null
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Bridge
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct bridge_stats {
+ bool enable;
+};
+
+struct bridge_ctx {
+ struct fpga_bridge *bridge;
+ struct platform_device *pdev;
+ struct bridge_stats stats;
+};
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only the enable_set op to track
+ * the state.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+/**
+ * register_test_bridge() - Register a fake FPGA bridge for testing.
+ * @test: KUnit test context object.
+ *
+ * Return: Context of the newly registered FPGA bridge.
+ */
+static struct bridge_ctx *register_test_bridge(struct kunit *test)
+{
+ struct bridge_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev);
+
+ ctx->bridge = fpga_bridge_register(&ctx->pdev->dev, "Fake FPGA bridge", &fake_bridge_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ return ctx;
+}
+
+static void unregister_test_bridge(struct bridge_ctx *ctx)
+{
+ fpga_bridge_unregister(ctx->bridge);
+ platform_device_unregister(ctx->pdev);
+}
+
+static void fpga_bridge_test_get(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ struct fpga_bridge *bridge;
+
+ bridge = fpga_bridge_get(&ctx->pdev->dev, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge);
+
+ bridge = fpga_bridge_get(&ctx->pdev->dev, NULL);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY);
+
+ fpga_bridge_put(ctx->bridge);
+}
+
+static void fpga_bridge_test_toggle(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_bridge_disable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_FALSE(test, ctx->stats.enable);
+
+ ret = fpga_bridge_enable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.enable);
+}
+
+/* Test the functions for getting and controlling a list of bridges */
+static void fpga_bridge_test_get_put_list(struct kunit *test)
+{
+ struct list_head bridge_list;
+ struct bridge_ctx *ctx_0, *ctx_1;
+ int ret;
+
+ ctx_0 = test->priv;
+ ctx_1 = register_test_bridge(test);
+
+ INIT_LIST_HEAD(&bridge_list);
+
+ /* Get bridge 0 and add it to the list */
+ ret = fpga_bridge_get_to_list(&ctx_0->pdev->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Get bridge 1 and add it to the list */
+ ret = fpga_bridge_get_to_list(&ctx_1->pdev->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Disable an then enable both bridges from the list */
+ ret = fpga_bridges_disable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_FALSE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_FALSE(test, ctx_1->stats.enable);
+
+ ret = fpga_bridges_enable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_TRUE(test, ctx_1->stats.enable);
+
+ /* Put and remove both bridges from the list */
+ fpga_bridges_put(&bridge_list);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list));
+
+ unregister_test_bridge(ctx_1);
+}
+
+static int fpga_bridge_test_init(struct kunit *test)
+{
+ test->priv = register_test_bridge(test);
+
+ return 0;
+}
+
+static void fpga_bridge_test_exit(struct kunit *test)
+{
+ unregister_test_bridge(test->priv);
+}
+
+static struct kunit_case fpga_bridge_test_cases[] = {
+ KUNIT_CASE(fpga_bridge_test_get),
+ KUNIT_CASE(fpga_bridge_test_toggle),
+ KUNIT_CASE(fpga_bridge_test_get_put_list),
+ {}
+};
+
+static struct kunit_suite fpga_bridge_suite = {
+ .name = "fpga_bridge",
+ .init = fpga_bridge_test_init,
+ .exit = fpga_bridge_test_exit,
+ .test_cases = fpga_bridge_test_cases,
+};
+
+kunit_test_suite(fpga_bridge_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
new file mode 100644
index 0000000000..6acec55b60
--- /dev/null
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Manager
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define HEADER_FILL 'H'
+#define IMAGE_FILL 'P'
+#define IMAGE_BLOCK 1024
+
+#define HEADER_SIZE IMAGE_BLOCK
+#define IMAGE_SIZE (IMAGE_BLOCK * 4)
+
+struct mgr_stats {
+ bool header_match;
+ bool image_match;
+ u32 seq_num;
+ u32 op_parse_header_seq;
+ u32 op_write_init_seq;
+ u32 op_write_seq;
+ u32 op_write_sg_seq;
+ u32 op_write_complete_seq;
+ enum fpga_mgr_states op_parse_header_state;
+ enum fpga_mgr_states op_write_init_state;
+ enum fpga_mgr_states op_write_state;
+ enum fpga_mgr_states op_write_sg_state;
+ enum fpga_mgr_states op_write_complete_state;
+};
+
+struct mgr_ctx {
+ struct fpga_image_info *img_info;
+ struct fpga_manager *mgr;
+ struct platform_device *pdev;
+ struct mgr_stats stats;
+};
+
+/**
+ * init_test_buffer() - Allocate and initialize a test image in a buffer.
+ * @test: KUnit test context object.
+ * @count: image size in bytes.
+ *
+ * Return: pointer to the newly allocated image.
+ */
+static char *init_test_buffer(struct kunit *test, size_t count)
+{
+ char *buf;
+
+ KUNIT_ASSERT_GE(test, count, HEADER_SIZE);
+
+ buf = kunit_kzalloc(test, count, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, HEADER_FILL, HEADER_SIZE);
+ memset(buf + HEADER_SIZE, IMAGE_FILL, count - HEADER_SIZE);
+
+ return buf;
+}
+
+/*
+ * Check the image header. Do not return an error code if the image check fails
+ * since, in this case, it is a failure of the FPGA manager itself, not this
+ * op that tests it.
+ */
+static int op_parse_header(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_parse_header_state = mgr->state;
+ stats->op_parse_header_seq = stats->seq_num++;
+
+ /* Set header_size and data_size for later */
+ info->header_size = HEADER_SIZE;
+ info->data_size = info->count - HEADER_SIZE;
+
+ stats->header_match = true;
+ for (i = 0; i < info->header_size; i++) {
+ if (buf[i] != HEADER_FILL) {
+ stats->header_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int op_write_init(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_init_state = mgr->state;
+ stats->op_write_init_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Check the image data. As with op_parse_header, do not return an error code
+ * if the image check fails.
+ */
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_write_state = mgr->state;
+ stats->op_write_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ for (i = 0; i < count; i++) {
+ if (buf[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check the image data, but first skip the header since write_sg will get
+ * the whole image in sg_table. As with op_parse_header, do not return an
+ * error code if the image check fails.
+ */
+static int op_write_sg(struct fpga_manager *mgr, struct sg_table *sgt)
+{
+ struct mgr_stats *stats = mgr->priv;
+ struct sg_mapping_iter miter;
+ char *img;
+ size_t i;
+
+ stats->op_write_sg_state = mgr->state;
+ stats->op_write_sg_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+
+ if (!sg_miter_skip(&miter, HEADER_SIZE)) {
+ stats->image_match = false;
+ goto out;
+ }
+
+ while (sg_miter_next(&miter)) {
+ img = miter.addr;
+ for (i = 0; i < miter.length; i++) {
+ if (img[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ goto out;
+ }
+ }
+ }
+out:
+ sg_miter_stop(&miter);
+ return 0;
+}
+
+static int op_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_complete_state = mgr->state;
+ stats->op_write_complete_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements all ops required to check the programming
+ * sequence using a single contiguous buffer and a scatter gather table.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .skip_header = true,
+ .parse_header = op_parse_header,
+ .write_init = op_write_init,
+ .write = op_write,
+ .write_sg = op_write_sg,
+ .write_complete = op_write_complete,
+};
+
+static void fpga_mgr_test_get(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct fpga_manager *mgr;
+
+ mgr = fpga_mgr_get(&ctx->pdev->dev);
+ KUNIT_EXPECT_PTR_EQ(test, mgr, ctx->mgr);
+
+ fpga_mgr_put(ctx->mgr);
+}
+
+static void fpga_mgr_test_lock(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+
+ fpga_mgr_unlock(ctx->mgr);
+}
+
+/* Check the programming sequence using an image in a buffer */
+static void fpga_mgr_test_img_load_buf(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ ctx->img_info->count = IMAGE_SIZE;
+ ctx->img_info->buf = img_buf;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+}
+
+/* Check the programming sequence using an image in a scatter gather table */
+static void fpga_mgr_test_img_load_sgt(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct sg_table *sgt;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+
+ ctx->img_info->sgt = sgt;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+
+ sg_free_table(ctx->img_info->sgt);
+}
+
+static int fpga_mgr_test_init(struct kunit *test)
+{
+ struct mgr_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev);
+
+ ctx->mgr = devm_fpga_mgr_register(&ctx->pdev->dev, "Fake FPGA Manager", &fake_mgr_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->img_info = fpga_image_info_alloc(&ctx->pdev->dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info);
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static void fpga_mgr_test_exit(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+
+ fpga_image_info_free(ctx->img_info);
+ platform_device_unregister(ctx->pdev);
+}
+
+static struct kunit_case fpga_mgr_test_cases[] = {
+ KUNIT_CASE(fpga_mgr_test_get),
+ KUNIT_CASE(fpga_mgr_test_lock),
+ KUNIT_CASE(fpga_mgr_test_img_load_buf),
+ KUNIT_CASE(fpga_mgr_test_img_load_sgt),
+ {}
+};
+
+static struct kunit_suite fpga_mgr_suite = {
+ .name = "fpga_mgr",
+ .init = fpga_mgr_test_init,
+ .exit = fpga_mgr_test_exit,
+ .test_cases = fpga_mgr_test_cases,
+};
+
+kunit_test_suite(fpga_mgr_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
new file mode 100644
index 0000000000..baab07e3fc
--- /dev/null
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Region
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct mgr_stats {
+ u32 write_count;
+};
+
+struct bridge_stats {
+ bool enable;
+ u32 cycles_count;
+};
+
+struct test_ctx {
+ struct fpga_manager *mgr;
+ struct platform_device *mgr_pdev;
+ struct fpga_bridge *bridge;
+ struct platform_device *bridge_pdev;
+ struct fpga_region *region;
+ struct platform_device *region_pdev;
+ struct bridge_stats bridge_stats;
+ struct mgr_stats mgr_stats;
+};
+
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->write_count++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements only the write op to count the number
+ * of programming cycles. The internals of the programming sequence are
+ * tested in the Manager suite since they are outside the responsibility
+ * of the Region.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .write = op_write,
+};
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ if (!stats->enable && enable)
+ stats->cycles_count++;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only enable_set op to count the number
+ * of activation cycles.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+static int fake_region_get_bridges(struct fpga_region *region)
+{
+ struct fpga_bridge *bridge = region->priv;
+
+ return fpga_bridge_get_to_list(bridge->dev.parent, region->info, &region->bridge_list);
+}
+
+static int fake_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static void fpga_region_test_class_find(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_region *region;
+
+ region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match);
+ KUNIT_EXPECT_PTR_EQ(test, region, ctx->region);
+
+ put_device(&region->dev);
+}
+
+/*
+ * FPGA Region programming test. The Region must call get_bridges() to get
+ * and control the bridges, and then the Manager for the actual programming.
+ */
+static void fpga_region_test_program_fpga(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_image_info *img_info;
+ char img_buf[4];
+ int ret;
+
+ img_info = fpga_image_info_alloc(&ctx->mgr_pdev->dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info);
+
+ img_info->buf = img_buf;
+ img_info->count = sizeof(img_buf);
+
+ ctx->region->info = img_info;
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 1, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 1, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 2, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+
+ fpga_image_info_free(img_info);
+}
+
+/*
+ * The configuration used in this test suite uses a single bridge to
+ * limit the code under test to a single unit. The functions used by the
+ * Region for getting and controlling bridges are tested (with a list of
+ * multiple bridges) in the Bridge suite.
+ */
+static int fpga_region_test_init(struct kunit *test)
+{
+ struct test_ctx *ctx;
+ struct fpga_region_info region_info = { 0 };
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->mgr_pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_pdev);
+
+ ctx->mgr = devm_fpga_mgr_register(&ctx->mgr_pdev->dev, "Fake FPGA Manager", &fake_mgr_ops,
+ &ctx->mgr_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->bridge_pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_pdev);
+
+ ctx->bridge = fpga_bridge_register(&ctx->bridge_pdev->dev, "Fake FPGA Bridge",
+ &fake_bridge_ops, &ctx->bridge_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ ctx->bridge_stats.enable = true;
+
+ ctx->region_pdev = platform_device_register_simple("region_pdev", PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_pdev);
+
+ region_info.mgr = ctx->mgr;
+ region_info.priv = ctx->bridge;
+ region_info.get_bridges = fake_region_get_bridges;
+
+ ctx->region = fpga_region_register_full(&ctx->region_pdev->dev, &region_info);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region));
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static void fpga_region_test_exit(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+
+ fpga_region_unregister(ctx->region);
+ platform_device_unregister(ctx->region_pdev);
+
+ fpga_bridge_unregister(ctx->bridge);
+ platform_device_unregister(ctx->bridge_pdev);
+
+ platform_device_unregister(ctx->mgr_pdev);
+}
+
+static struct kunit_case fpga_region_test_cases[] = {
+ KUNIT_CASE(fpga_region_test_class_find),
+ KUNIT_CASE(fpga_region_test_program_fpga),
+
+ {}
+};
+
+static struct kunit_suite fpga_region_suite = {
+ .name = "fpga_mgr",
+ .init = fpga_region_test_init,
+ .exit = fpga_region_test_exit,
+ .test_cases = fpga_region_test_cases,
+};
+
+kunit_test_suite(fpga_region_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
new file mode 100644
index 0000000000..4e1d2a4d3d
--- /dev/null
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Technologic Systems TS-73xx SBC FPGA loader
+ *
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * FPGA Manager Driver for the on-board Altera Cyclone II FPGA found on
+ * TS-7300, heavily based on load_fpga.c in their vendor tree.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/iopoll.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#define TS73XX_FPGA_DATA_REG 0
+#define TS73XX_FPGA_CONFIG_REG 1
+
+#define TS73XX_FPGA_WRITE_DONE 0x1
+#define TS73XX_FPGA_WRITE_DONE_TIMEOUT 1000 /* us */
+#define TS73XX_FPGA_RESET 0x2
+#define TS73XX_FPGA_RESET_LOW_DELAY 30 /* us */
+#define TS73XX_FPGA_RESET_HIGH_DELAY 80 /* us */
+#define TS73XX_FPGA_LOAD_OK 0x4
+#define TS73XX_FPGA_CONFIG_LOAD 0x8
+
+struct ts73xx_fpga_priv {
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+static int ts73xx_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+
+ /* Reset the FPGA */
+ writeb(0, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ udelay(TS73XX_FPGA_RESET_LOW_DELAY);
+ writeb(TS73XX_FPGA_RESET, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ udelay(TS73XX_FPGA_RESET_HIGH_DELAY);
+
+ return 0;
+}
+
+static int ts73xx_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+ size_t i = 0;
+ int ret;
+ u8 reg;
+
+ while (count--) {
+ ret = readb_poll_timeout(priv->io_base + TS73XX_FPGA_CONFIG_REG,
+ reg, !(reg & TS73XX_FPGA_WRITE_DONE),
+ 1, TS73XX_FPGA_WRITE_DONE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ writeb(buf[i], priv->io_base + TS73XX_FPGA_DATA_REG);
+ i++;
+ }
+
+ return 0;
+}
+
+static int ts73xx_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+ u8 reg;
+
+ usleep_range(1000, 2000);
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ reg |= TS73XX_FPGA_CONFIG_LOAD;
+ writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+
+ usleep_range(1000, 2000);
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ reg &= ~TS73XX_FPGA_CONFIG_LOAD;
+ writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ if ((reg & TS73XX_FPGA_LOAD_OK) != TS73XX_FPGA_LOAD_OK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct fpga_manager_ops ts73xx_fpga_ops = {
+ .write_init = ts73xx_fpga_write_init,
+ .write = ts73xx_fpga_write,
+ .write_complete = ts73xx_fpga_write_complete,
+};
+
+static int ts73xx_fpga_probe(struct platform_device *pdev)
+{
+ struct device *kdev = &pdev->dev;
+ struct ts73xx_fpga_priv *priv;
+ struct fpga_manager *mgr;
+
+ priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = kdev;
+
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ mgr = devm_fpga_mgr_register(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static struct platform_driver ts73xx_fpga_driver = {
+ .driver = {
+ .name = "ts73xx-fpga-mgr",
+ },
+ .probe = ts73xx_fpga_probe,
+};
+module_platform_driver(ts73xx_fpga_driver);
+
+MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
+MODULE_DESCRIPTION("TS-73xx FPGA Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
new file mode 100644
index 0000000000..e1601b3a34
--- /dev/null
+++ b/drivers/fpga/versal-fpga.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+static int versal_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ return 0;
+}
+
+static int versal_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ dma_addr_t dma_addr = 0;
+ char *kbuf;
+ int ret;
+
+ kbuf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+ ret = zynqmp_pm_load_pdi(PDI_SRC_DDR, dma_addr);
+ dma_free_coherent(mgr->dev.parent, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static const struct fpga_manager_ops versal_fpga_ops = {
+ .write_init = versal_fpga_ops_write_init,
+ .write = versal_fpga_ops_write,
+};
+
+static int versal_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fpga_manager *mgr;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mgr = devm_fpga_mgr_register(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, NULL);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+static const struct of_device_id versal_fpga_of_match[] = {
+ { .compatible = "xlnx,versal-fpga", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, versal_fpga_of_match);
+
+static struct platform_driver versal_fpga_driver = {
+ .probe = versal_fpga_probe,
+ .driver = {
+ .name = "versal_fpga_manager",
+ .of_match_table = of_match_ptr(versal_fpga_of_match),
+ },
+};
+module_platform_driver(versal_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Versal FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
new file mode 100644
index 0000000000..208d9560f5
--- /dev/null
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, National Instruments Corp.
+ * Copyright (c) 2017, Xilinx Inc
+ *
+ * FPGA Bridge Driver for the Xilinx LogiCORE Partial Reconfiguration
+ * Decoupler IP Core.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define CTRL_CMD_DECOUPLE BIT(0)
+#define CTRL_CMD_COUPLE 0
+#define CTRL_OFFSET 0
+
+struct xlnx_config_data {
+ const char *name;
+};
+
+struct xlnx_pr_decoupler_data {
+ const struct xlnx_config_data *ipconfig;
+ void __iomem *io_base;
+ struct clk *clk;
+};
+
+static inline void xlnx_pr_decoupler_write(struct xlnx_pr_decoupler_data *d,
+ u32 offset, u32 val)
+{
+ writel(val, d->io_base + offset);
+}
+
+static inline u32 xlnx_pr_decouple_read(const struct xlnx_pr_decoupler_data *d,
+ u32 offset)
+{
+ return readl(d->io_base + offset);
+}
+
+static int xlnx_pr_decoupler_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ int err;
+ struct xlnx_pr_decoupler_data *priv = bridge->priv;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ if (enable)
+ xlnx_pr_decoupler_write(priv, CTRL_OFFSET, CTRL_CMD_COUPLE);
+ else
+ xlnx_pr_decoupler_write(priv, CTRL_OFFSET, CTRL_CMD_DECOUPLE);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
+{
+ const struct xlnx_pr_decoupler_data *priv = bridge->priv;
+ u32 status;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ status = xlnx_pr_decouple_read(priv, CTRL_OFFSET);
+
+ clk_disable(priv->clk);
+
+ return !status;
+}
+
+static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
+ .enable_set = xlnx_pr_decoupler_enable_set,
+ .enable_show = xlnx_pr_decoupler_enable_show,
+};
+
+#ifdef CONFIG_OF
+static const struct xlnx_config_data decoupler_config = {
+ .name = "Xilinx PR Decoupler",
+};
+
+static const struct xlnx_config_data shutdown_config = {
+ .name = "Xilinx DFX AXI Shutdown Manager",
+};
+
+static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
+ { .compatible = "xlnx,pr-decoupler-1.00", .data = &decoupler_config },
+ { .compatible = "xlnx,pr-decoupler", .data = &decoupler_config },
+ { .compatible = "xlnx,dfx-axi-shutdown-manager-1.00",
+ .data = &shutdown_config },
+ { .compatible = "xlnx,dfx-axi-shutdown-manager",
+ .data = &shutdown_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
+#endif
+
+static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct xlnx_pr_decoupler_data *priv;
+ struct fpga_bridge *br;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xlnx_pr_decoupler_of_match, np);
+ if (match && match->data)
+ priv->ipconfig = match->data;
+ }
+
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ priv->clk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "input clock not found\n");
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ return err;
+ }
+
+ clk_disable(priv->clk);
+
+ br = fpga_bridge_register(&pdev->dev, priv->ipconfig->name,
+ &xlnx_pr_decoupler_br_ops, priv);
+ if (IS_ERR(br)) {
+ err = PTR_ERR(br);
+ dev_err(&pdev->dev, "unable to register %s",
+ priv->ipconfig->name);
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, br);
+
+ return 0;
+
+err_clk:
+ clk_unprepare(priv->clk);
+
+ return err;
+}
+
+static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct xlnx_pr_decoupler_data *p = bridge->priv;
+
+ fpga_bridge_unregister(bridge);
+
+ clk_unprepare(p->clk);
+
+ return 0;
+}
+
+static struct platform_driver xlnx_pr_decoupler_driver = {
+ .probe = xlnx_pr_decoupler_probe,
+ .remove = xlnx_pr_decoupler_remove,
+ .driver = {
+ .name = "xlnx_pr_decoupler",
+ .of_match_table = of_match_ptr(xlnx_pr_decoupler_of_match),
+ },
+};
+
+module_platform_driver(xlnx_pr_decoupler_driver);
+
+MODULE_DESCRIPTION("Xilinx Partial Reconfiguration Decoupler");
+MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
new file mode 100644
index 0000000000..e1a227e7ff
--- /dev/null
+++ b/drivers/fpga/xilinx-spi.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xilinx Spartan6 and 7 Series Slave Serial SPI Driver
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * Manage Xilinx FPGA firmware that is loaded over SPI using
+ * the slave serial configuration interface.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+struct xilinx_spi_conf {
+ struct spi_device *spi;
+ struct gpio_desc *prog_b;
+ struct gpio_desc *init_b;
+ struct gpio_desc *done;
+};
+
+static int get_done_gpio(struct fpga_manager *mgr)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ int ret;
+
+ ret = gpiod_get_value(conf->done);
+
+ if (ret < 0)
+ dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret);
+
+ return ret;
+}
+
+static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+{
+ if (!get_done_gpio(mgr))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+/**
+ * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
+ * a given delay if the pin is unavailable
+ *
+ * @mgr: The FPGA manager object
+ * @value: Value INIT_B to wait for (1 = asserted = low)
+ * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
+ *
+ * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
+ * too much time passed waiting for that. If no INIT_B GPIO is available
+ * then always return 0.
+ */
+static int wait_for_init_b(struct fpga_manager *mgr, int value,
+ unsigned long alt_udelay)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ if (conf->init_b) {
+ while (time_before(jiffies, timeout)) {
+ int ret = gpiod_get_value(conf->init_b);
+
+ if (ret == value)
+ return 0;
+
+ if (ret < 0) {
+ dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(100, 400);
+ }
+
+ dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n",
+ value ? "assert" : "deassert");
+ return -ETIMEDOUT;
+ }
+
+ udelay(alt_udelay);
+
+ return 0;
+}
+
+static int xilinx_spi_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ int err;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value(conf->prog_b, 1);
+
+ err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
+ if (err) {
+ gpiod_set_value(conf->prog_b, 0);
+ return err;
+ }
+
+ gpiod_set_value(conf->prog_b, 0);
+
+ err = wait_for_init_b(mgr, 0, 0);
+ if (err)
+ return err;
+
+ if (get_done_gpio(mgr)) {
+ dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
+ return -EIO;
+ }
+
+ /* program latency */
+ usleep_range(7500, 7600);
+ return 0;
+}
+
+static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ size_t remaining, stride;
+ int ret;
+
+ remaining = fw_data_end - fw_data;
+ stride = min_t(size_t, remaining, SZ_4K);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "SPI error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int xilinx_spi_apply_cclk_cycles(struct xilinx_spi_conf *conf)
+{
+ struct spi_device *spi = conf->spi;
+ const u8 din_data[1] = { 0xff };
+ int ret;
+
+ ret = spi_write(conf->spi, din_data, sizeof(din_data));
+ if (ret)
+ dev_err(&spi->dev, "applying CCLK cycles failed: %d\n", ret);
+
+ return ret;
+}
+
+static int xilinx_spi_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
+ bool expired = false;
+ int done;
+ int ret;
+
+ /*
+ * This loop is carefully written such that if the driver is
+ * scheduled out for more than 'timeout', we still check for DONE
+ * before giving up and we apply 8 extra CCLK cycles in all cases.
+ */
+ while (!expired) {
+ expired = time_after(jiffies, timeout);
+
+ done = get_done_gpio(mgr);
+ if (done < 0)
+ return done;
+
+ ret = xilinx_spi_apply_cclk_cycles(conf);
+ if (ret)
+ return ret;
+
+ if (done)
+ return 0;
+ }
+
+ if (conf->init_b) {
+ ret = gpiod_get_value(conf->init_b);
+
+ if (ret < 0) {
+ dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
+ dev_err(&mgr->dev,
+ ret ? "CRC error or invalid device\n"
+ : "Missing sync word or incomplete bitstream\n");
+ } else {
+ dev_err(&mgr->dev, "Timeout after config data transfer\n");
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct fpga_manager_ops xilinx_spi_ops = {
+ .state = xilinx_spi_state,
+ .write_init = xilinx_spi_write_init,
+ .write = xilinx_spi_write,
+ .write_complete = xilinx_spi_write_complete,
+};
+
+static int xilinx_spi_probe(struct spi_device *spi)
+{
+ struct xilinx_spi_conf *conf;
+ struct fpga_manager *mgr;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ conf->spi = spi;
+
+ /* PROGRAM_B is active low */
+ conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
+ if (IS_ERR(conf->prog_b))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
+ "Failed to get PROGRAM_B gpio\n");
+
+ conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+ if (IS_ERR(conf->init_b))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
+ "Failed to get INIT_B gpio\n");
+
+ conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
+ if (IS_ERR(conf->done))
+ return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
+ "Failed to get DONE gpio\n");
+
+ mgr = devm_fpga_mgr_register(&spi->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_spi_of_match[] = {
+ { .compatible = "xlnx,fpga-slave-serial", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_spi_of_match);
+#endif
+
+static struct spi_driver xilinx_slave_spi_driver = {
+ .driver = {
+ .name = "xlnx-slave-spi",
+ .of_match_table = of_match_ptr(xlnx_spi_of_match),
+ },
+ .probe = xilinx_spi_probe,
+};
+
+module_spi_driver(xilinx_slave_spi_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Load Xilinx FPGA firmware over SPI");
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
new file mode 100644
index 0000000000..96611d424a
--- /dev/null
+++ b/drivers/fpga/zynq-fpga.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2015 Xilinx Inc.
+ * Copyright (c) 2015, National Instruments Corp.
+ *
+ * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
+ * in their vendor tree.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/scatterlist.h>
+
+/* Offsets into SLCR regmap */
+
+/* FPGA Software Reset Control */
+#define SLCR_FPGA_RST_CTRL_OFFSET 0x240
+/* Level Shifters Enable */
+#define SLCR_LVL_SHFTR_EN_OFFSET 0x900
+
+/* Constant Definitions */
+
+/* Control Register */
+#define CTRL_OFFSET 0x00
+/* Lock Register */
+#define LOCK_OFFSET 0x04
+/* Interrupt Status Register */
+#define INT_STS_OFFSET 0x0c
+/* Interrupt Mask Register */
+#define INT_MASK_OFFSET 0x10
+/* Status Register */
+#define STATUS_OFFSET 0x14
+/* DMA Source Address Register */
+#define DMA_SRC_ADDR_OFFSET 0x18
+/* DMA Destination Address Reg */
+#define DMA_DST_ADDR_OFFSET 0x1c
+/* DMA Source Transfer Length */
+#define DMA_SRC_LEN_OFFSET 0x20
+/* DMA Destination Transfer */
+#define DMA_DEST_LEN_OFFSET 0x24
+/* Unlock Register */
+#define UNLOCK_OFFSET 0x34
+/* Misc. Control Register */
+#define MCTRL_OFFSET 0x80
+
+/* Control Register Bit definitions */
+
+/* Signal to reset FPGA */
+#define CTRL_PCFG_PROG_B_MASK BIT(30)
+/* Enable PCAP for PR */
+#define CTRL_PCAP_PR_MASK BIT(27)
+/* Enable PCAP */
+#define CTRL_PCAP_MODE_MASK BIT(26)
+/* Lower rate to allow decrypt on the fly */
+#define CTRL_PCAP_RATE_EN_MASK BIT(25)
+/* System booted in secure mode */
+#define CTRL_SEC_EN_MASK BIT(7)
+
+/* Miscellaneous Control Register bit definitions */
+/* Internal PCAP loopback */
+#define MCTRL_PCAP_LPBK_MASK BIT(4)
+
+/* Status register bit definitions */
+
+/* FPGA init status */
+#define STATUS_DMA_Q_F BIT(31)
+#define STATUS_DMA_Q_E BIT(30)
+#define STATUS_PCFG_INIT_MASK BIT(4)
+
+/* Interrupt Status/Mask Register Bit definitions */
+/* DMA command done */
+#define IXR_DMA_DONE_MASK BIT(13)
+/* DMA and PCAP cmd done */
+#define IXR_D_P_DONE_MASK BIT(12)
+ /* FPGA programmed */
+#define IXR_PCFG_DONE_MASK BIT(2)
+#define IXR_ERROR_FLAGS_MASK 0x00F0C860
+#define IXR_ALL_MASK 0xF8F7F87F
+
+/* Miscellaneous constant values */
+
+/* Invalid DMA addr */
+#define DMA_INVALID_ADDRESS GENMASK(31, 0)
+/* Used to unlock the dev */
+#define UNLOCK_MASK 0x757bdf0d
+/* Timeout for polling reset bits */
+#define INIT_POLL_TIMEOUT 2500000
+/* Delay for polling reset bits */
+#define INIT_POLL_DELAY 20
+/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
+ * interrupting
+ */
+#define DMA_SRC_LAST_TRANSFER 1
+/* Timeout for DMA completion */
+#define DMA_TIMEOUT_MS 5000
+
+/* Masks for controlling stuff in SLCR */
+/* Disable all Level shifters */
+#define LVL_SHFTR_DISABLE_ALL_MASK 0x0
+/* Enable Level shifters from PS to PL */
+#define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
+/* Enable Level shifters from PL to PS */
+#define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
+/* Enable global resets */
+#define FPGA_RST_ALL_MASK 0xf
+/* Disable global resets */
+#define FPGA_RST_NONE_MASK 0x0
+
+struct zynq_fpga_priv {
+ int irq;
+ struct clk *clk;
+
+ void __iomem *io_base;
+ struct regmap *slcr;
+
+ spinlock_t dma_lock;
+ unsigned int dma_elm;
+ unsigned int dma_nelms;
+ struct scatterlist *cur_sg;
+
+ struct completion dma_done;
+};
+
+static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
+ u32 val)
+{
+ writel(val, priv->io_base + offset);
+}
+
+static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
+ u32 offset)
+{
+ return readl(priv->io_base + offset);
+}
+
+#define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
+ timeout_us)
+
+/* Cause the specified irq mask bits to generate IRQs */
+static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
+{
+ zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
+}
+
+/* Must be called with dma_lock held */
+static void zynq_step_dma(struct zynq_fpga_priv *priv)
+{
+ u32 addr;
+ u32 len;
+ bool first;
+
+ first = priv->dma_elm == 0;
+ while (priv->cur_sg) {
+ /* Feed the DMA queue until it is full. */
+ if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
+ break;
+
+ addr = sg_dma_address(priv->cur_sg);
+ len = sg_dma_len(priv->cur_sg);
+ if (priv->dma_elm + 1 == priv->dma_nelms) {
+ /* The last transfer waits for the PCAP to finish too,
+ * notice this also changes the irq_mask to ignore
+ * IXR_DMA_DONE_MASK which ensures we do not trigger
+ * the completion too early.
+ */
+ addr |= DMA_SRC_LAST_TRANSFER;
+ priv->cur_sg = NULL;
+ } else {
+ priv->cur_sg = sg_next(priv->cur_sg);
+ priv->dma_elm++;
+ }
+
+ zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
+ zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
+ zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
+ zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
+ }
+
+ /* Once the first transfer is queued we can turn on the ISR, future
+ * calls to zynq_step_dma will happen from the ISR context. The
+ * dma_lock spinlock guarantees this handover is done coherently, the
+ * ISR enable is put at the end to avoid another CPU spinning in the
+ * ISR on this lock.
+ */
+ if (first && priv->cur_sg) {
+ zynq_fpga_set_irq(priv,
+ IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+ } else if (!priv->cur_sg) {
+ /* The last transfer changes to DMA & PCAP mode since we do
+ * not want to continue until everything has been flushed into
+ * the PCAP.
+ */
+ zynq_fpga_set_irq(priv,
+ IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+ }
+}
+
+static irqreturn_t zynq_fpga_isr(int irq, void *data)
+{
+ struct zynq_fpga_priv *priv = data;
+ u32 intr_status;
+
+ /* If anything other than DMA completion is reported stop and hand
+ * control back to zynq_fpga_ops_write, something went wrong,
+ * otherwise progress the DMA.
+ */
+ spin_lock(&priv->dma_lock);
+ intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+ if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
+ (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
+ zynq_step_dma(priv);
+ spin_unlock(&priv->dma_lock);
+ return IRQ_HANDLED;
+ }
+ spin_unlock(&priv->dma_lock);
+
+ zynq_fpga_set_irq(priv, 0);
+ complete(&priv->dma_done);
+
+ return IRQ_HANDLED;
+}
+
+/* Sanity check the proposed bitstream. It must start with the sync word in
+ * the correct byte order, and be dword aligned. The input is a Xilinx .bin
+ * file with every 32 bit quantity swapped.
+ */
+static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
+{
+ for (; count >= 4; buf += 4, count -= 4)
+ if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
+ buf[3] == 0xaa)
+ return true;
+ return false;
+}
+
+static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct zynq_fpga_priv *priv;
+ u32 ctrl, status;
+ int err;
+
+ priv = mgr->priv;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ /* check if bitstream is encrypted & and system's still secure */
+ if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ if (!(ctrl & CTRL_SEC_EN_MASK)) {
+ dev_err(&mgr->dev,
+ "System not secure, can't use encrypted bitstreams\n");
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ /* don't globally reset PL if we're doing partial reconfig */
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ if (!zynq_fpga_has_sync(buf, count)) {
+ dev_err(&mgr->dev,
+ "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ /* assert AXI interface resets */
+ regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
+ FPGA_RST_ALL_MASK);
+
+ /* disable all level shifters */
+ regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
+ LVL_SHFTR_DISABLE_ALL_MASK);
+ /* enable level shifters from PS to PL */
+ regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
+ LVL_SHFTR_ENABLE_PS_TO_PL);
+
+ /* create a rising edge on PCFG_INIT. PCFG_INIT follows
+ * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
+ * to make sure the rising edge actually happens.
+ * Note: PCFG_PROG_B is low active, sequence as described in
+ * UG585 v1.10 page 211
+ */
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ ctrl |= CTRL_PCFG_PROG_B_MASK;
+
+ zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
+
+ err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
+ status & STATUS_PCFG_INIT_MASK,
+ INIT_POLL_DELAY,
+ INIT_POLL_TIMEOUT);
+ if (err) {
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
+ goto out_err;
+ }
+
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ ctrl &= ~CTRL_PCFG_PROG_B_MASK;
+
+ zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
+
+ err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
+ !(status & STATUS_PCFG_INIT_MASK),
+ INIT_POLL_DELAY,
+ INIT_POLL_TIMEOUT);
+ if (err) {
+ dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
+ goto out_err;
+ }
+
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ ctrl |= CTRL_PCFG_PROG_B_MASK;
+
+ zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
+
+ err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
+ status & STATUS_PCFG_INIT_MASK,
+ INIT_POLL_DELAY,
+ INIT_POLL_TIMEOUT);
+ if (err) {
+ dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
+ goto out_err;
+ }
+ }
+
+ /* set configuration register with following options:
+ * - enable PCAP interface
+ * - set throughput for maximum speed (if bistream not encrypted)
+ * - set CPU in user mode
+ */
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
+ | CTRL_PCAP_RATE_EN_MASK | ctrl));
+ else
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
+ | ctrl));
+
+
+ /* We expect that the command queue is empty right now. */
+ status = zynq_fpga_read(priv, STATUS_OFFSET);
+ if ((status & STATUS_DMA_Q_F) ||
+ (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
+ dev_err(&mgr->dev, "DMA command queue not right\n");
+ err = -EBUSY;
+ goto out_err;
+ }
+
+ /* ensure internal PCAP loopback is disabled */
+ ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
+ zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
+
+ clk_disable(priv->clk);
+
+ return 0;
+
+out_err:
+ clk_disable(priv->clk);
+
+ return err;
+}
+
+static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
+{
+ struct zynq_fpga_priv *priv;
+ const char *why;
+ int err;
+ u32 intr_status;
+ unsigned long timeout;
+ unsigned long flags;
+ struct scatterlist *sg;
+ int i;
+
+ priv = mgr->priv;
+
+ /* The hardware can only DMA multiples of 4 bytes, and it requires the
+ * starting addresses to be aligned to 64 bits (UG585 pg 212).
+ */
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ if ((sg->offset % 8) || (sg->length % 4)) {
+ dev_err(&mgr->dev,
+ "Invalid bitstream, chunks must be aligned\n");
+ return -EINVAL;
+ }
+ }
+
+ priv->dma_nelms =
+ dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ if (priv->dma_nelms == 0) {
+ dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
+ return -ENOMEM;
+ }
+
+ /* enable clock */
+ err = clk_enable(priv->clk);
+ if (err)
+ goto out_free;
+
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+ reinit_completion(&priv->dma_done);
+
+ /* zynq_step_dma will turn on interrupts */
+ spin_lock_irqsave(&priv->dma_lock, flags);
+ priv->dma_elm = 0;
+ priv->cur_sg = sgt->sgl;
+ zynq_step_dma(priv);
+ spin_unlock_irqrestore(&priv->dma_lock, flags);
+
+ timeout = wait_for_completion_timeout(&priv->dma_done,
+ msecs_to_jiffies(DMA_TIMEOUT_MS));
+
+ spin_lock_irqsave(&priv->dma_lock, flags);
+ zynq_fpga_set_irq(priv, 0);
+ priv->cur_sg = NULL;
+ spin_unlock_irqrestore(&priv->dma_lock, flags);
+
+ intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+
+ /* There doesn't seem to be a way to force cancel any DMA, so if
+ * something went wrong we are relying on the hardware to have halted
+ * the DMA before we get here, if there was we could use
+ * wait_for_completion_interruptible too.
+ */
+
+ if (intr_status & IXR_ERROR_FLAGS_MASK) {
+ why = "DMA reported error";
+ err = -EIO;
+ goto out_report;
+ }
+
+ if (priv->cur_sg ||
+ !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
+ if (timeout == 0)
+ why = "DMA timed out";
+ else
+ why = "DMA did not complete";
+ err = -EIO;
+ goto out_report;
+ }
+
+ err = 0;
+ goto out_clk;
+
+out_report:
+ dev_err(&mgr->dev,
+ "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
+ why,
+ intr_status,
+ zynq_fpga_read(priv, CTRL_OFFSET),
+ zynq_fpga_read(priv, LOCK_OFFSET),
+ zynq_fpga_read(priv, INT_MASK_OFFSET),
+ zynq_fpga_read(priv, STATUS_OFFSET),
+ zynq_fpga_read(priv, MCTRL_OFFSET));
+
+out_clk:
+ clk_disable(priv->clk);
+
+out_free:
+ dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ return err;
+}
+
+static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct zynq_fpga_priv *priv = mgr->priv;
+ int err;
+ u32 intr_status;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
+ intr_status & IXR_PCFG_DONE_MASK,
+ INIT_POLL_DELAY,
+ INIT_POLL_TIMEOUT);
+
+ /* Release 'PR' control back to the ICAP */
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
+
+ clk_disable(priv->clk);
+
+ if (err)
+ return err;
+
+ /* for the partial reconfig case we didn't touch the level shifters */
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ /* enable level shifters from PL to PS */
+ regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
+ LVL_SHFTR_ENABLE_PL_TO_PS);
+
+ /* deassert AXI interface resets */
+ regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
+ FPGA_RST_NONE_MASK);
+ }
+
+ return 0;
+}
+
+static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
+{
+ int err;
+ u32 intr_status;
+ struct zynq_fpga_priv *priv;
+
+ priv = mgr->priv;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return FPGA_MGR_STATE_UNKNOWN;
+
+ intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+ clk_disable(priv->clk);
+
+ if (intr_status & IXR_PCFG_DONE_MASK)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops zynq_fpga_ops = {
+ .initial_header_size = 128,
+ .state = zynq_fpga_ops_state,
+ .write_init = zynq_fpga_ops_write_init,
+ .write_sg = zynq_fpga_ops_write,
+ .write_complete = zynq_fpga_ops_write_complete,
+};
+
+static int zynq_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynq_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ spin_lock_init(&priv->dma_lock);
+
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "syscon");
+ if (IS_ERR(priv->slcr)) {
+ dev_err(dev, "unable to get zynq-slcr regmap\n");
+ return PTR_ERR(priv->slcr);
+ }
+
+ init_completion(&priv->dma_done);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ priv->clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "input clock not found\n");
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(dev, "unable to enable clock\n");
+ return err;
+ }
+
+ /* unlock the device */
+ zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
+
+ zynq_fpga_set_irq(priv, 0);
+ zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+ err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "unable to request IRQ\n");
+ clk_disable_unprepare(priv->clk);
+ return err;
+ }
+
+ clk_disable(priv->clk);
+
+ mgr = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
+ &zynq_fpga_ops, priv);
+ if (IS_ERR(mgr)) {
+ dev_err(dev, "unable to register FPGA manager\n");
+ clk_unprepare(priv->clk);
+ return PTR_ERR(mgr);
+ }
+
+ platform_set_drvdata(pdev, mgr);
+
+ return 0;
+}
+
+static int zynq_fpga_remove(struct platform_device *pdev)
+{
+ struct zynq_fpga_priv *priv;
+ struct fpga_manager *mgr;
+
+ mgr = platform_get_drvdata(pdev);
+ priv = mgr->priv;
+
+ fpga_mgr_unregister(mgr);
+
+ clk_unprepare(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id zynq_fpga_of_match[] = {
+ { .compatible = "xlnx,zynq-devcfg-1.0", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
+#endif
+
+static struct platform_driver zynq_fpga_driver = {
+ .probe = zynq_fpga_probe,
+ .remove = zynq_fpga_remove,
+ .driver = {
+ .name = "zynq_fpga_manager",
+ .of_match_table = of_match_ptr(zynq_fpga_of_match),
+ },
+};
+
+module_platform_driver(zynq_fpga_driver);
+
+MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
new file mode 100644
index 0000000000..f3434e2c48
--- /dev/null
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define IXR_FPGA_DONE_MASK BIT(3)
+
+/**
+ * struct zynqmp_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the bitfile type
+ */
+struct zynqmp_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct zynqmp_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct zynqmp_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ u32 eemi_flags = 0;
+ char *kbuf;
+ int ret;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+
+ ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
+{
+ u32 status = 0;
+
+ zynqmp_pm_fpga_get_status(&status);
+ if (status & IXR_FPGA_DONE_MASK)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status;
+ int ret;
+
+ ret = zynqmp_pm_fpga_get_config_status(&status);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", status);
+}
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *zynqmp_fpga_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(zynqmp_fpga);
+
+static const struct fpga_manager_ops zynqmp_fpga_ops = {
+ .state = zynqmp_fpga_ops_state,
+ .write_init = zynqmp_fpga_ops_write_init,
+ .write = zynqmp_fpga_ops_write,
+};
+
+static int zynqmp_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ mgr = devm_fpga_mgr_register(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id zynqmp_fpga_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pcap-fpga", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
+#endif
+
+static struct platform_driver zynqmp_fpga_driver = {
+ .probe = zynqmp_fpga_probe,
+ .driver = {
+ .name = "zynqmp_fpga_manager",
+ .of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ .dev_groups = zynqmp_fpga_groups,
+ },
+};
+
+module_platform_driver(zynqmp_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager");
+MODULE_LICENSE("GPL");